2 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pci_hotplug.h>
26 #include <asm-generic/pci-bridge.h>
27 #include <asm/setup.h>
30 const char *pci_power_names
[] = {
31 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
33 EXPORT_SYMBOL_GPL(pci_power_names
);
35 int isa_dma_bridge_buggy
;
36 EXPORT_SYMBOL(isa_dma_bridge_buggy
);
39 EXPORT_SYMBOL(pci_pci_problems
);
41 unsigned int pci_pm_d3_delay
;
43 static void pci_pme_list_scan(struct work_struct
*work
);
45 static LIST_HEAD(pci_pme_list
);
46 static DEFINE_MUTEX(pci_pme_list_mutex
);
47 static DECLARE_DELAYED_WORK(pci_pme_work
, pci_pme_list_scan
);
49 struct pci_pme_device
{
50 struct list_head list
;
54 #define PME_TIMEOUT 1000 /* How long between PME checks */
56 static void pci_dev_d3_sleep(struct pci_dev
*dev
)
58 unsigned int delay
= dev
->d3_delay
;
60 if (delay
< pci_pm_d3_delay
)
61 delay
= pci_pm_d3_delay
;
66 #ifdef CONFIG_PCI_DOMAINS
67 int pci_domains_supported
= 1;
70 #define DEFAULT_CARDBUS_IO_SIZE (256)
71 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
72 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
73 unsigned long pci_cardbus_io_size
= DEFAULT_CARDBUS_IO_SIZE
;
74 unsigned long pci_cardbus_mem_size
= DEFAULT_CARDBUS_MEM_SIZE
;
76 #define DEFAULT_HOTPLUG_IO_SIZE (256)
77 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
78 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
79 unsigned long pci_hotplug_io_size
= DEFAULT_HOTPLUG_IO_SIZE
;
80 unsigned long pci_hotplug_mem_size
= DEFAULT_HOTPLUG_MEM_SIZE
;
82 enum pcie_bus_config_types pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
85 * The default CLS is used if arch didn't set CLS explicitly and not
86 * all pci devices agree on the same value. Arch can override either
87 * the dfl or actual value as it sees fit. Don't forget this is
88 * measured in 32-bit words, not bytes.
90 u8 pci_dfl_cache_line_size
= L1_CACHE_BYTES
>> 2;
91 u8 pci_cache_line_size
;
94 * If we set up a device for bus mastering, we need to check the latency
95 * timer as certain BIOSes forget to set it properly.
97 unsigned int pcibios_max_latency
= 255;
99 /* If set, the PCIe ARI capability will not be used. */
100 static bool pcie_ari_disabled
;
103 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
104 * @bus: pointer to PCI bus structure to search
106 * Given a PCI bus, returns the highest PCI bus number present in the set
107 * including the given PCI bus and its list of child PCI buses.
109 unsigned char pci_bus_max_busnr(struct pci_bus
* bus
)
111 struct list_head
*tmp
;
112 unsigned char max
, n
;
114 max
= bus
->busn_res
.end
;
115 list_for_each(tmp
, &bus
->children
) {
116 n
= pci_bus_max_busnr(pci_bus_b(tmp
));
122 EXPORT_SYMBOL_GPL(pci_bus_max_busnr
);
124 #ifdef CONFIG_HAS_IOMEM
125 void __iomem
*pci_ioremap_bar(struct pci_dev
*pdev
, int bar
)
128 * Make sure the BAR is actually a memory resource, not an IO resource
130 if (!(pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)) {
134 return ioremap_nocache(pci_resource_start(pdev
, bar
),
135 pci_resource_len(pdev
, bar
));
137 EXPORT_SYMBOL_GPL(pci_ioremap_bar
);
140 #define PCI_FIND_CAP_TTL 48
142 static int __pci_find_next_cap_ttl(struct pci_bus
*bus
, unsigned int devfn
,
143 u8 pos
, int cap
, int *ttl
)
148 pci_bus_read_config_byte(bus
, devfn
, pos
, &pos
);
152 pci_bus_read_config_byte(bus
, devfn
, pos
+ PCI_CAP_LIST_ID
,
158 pos
+= PCI_CAP_LIST_NEXT
;
163 static int __pci_find_next_cap(struct pci_bus
*bus
, unsigned int devfn
,
166 int ttl
= PCI_FIND_CAP_TTL
;
168 return __pci_find_next_cap_ttl(bus
, devfn
, pos
, cap
, &ttl
);
171 int pci_find_next_capability(struct pci_dev
*dev
, u8 pos
, int cap
)
173 return __pci_find_next_cap(dev
->bus
, dev
->devfn
,
174 pos
+ PCI_CAP_LIST_NEXT
, cap
);
176 EXPORT_SYMBOL_GPL(pci_find_next_capability
);
178 static int __pci_bus_find_cap_start(struct pci_bus
*bus
,
179 unsigned int devfn
, u8 hdr_type
)
183 pci_bus_read_config_word(bus
, devfn
, PCI_STATUS
, &status
);
184 if (!(status
& PCI_STATUS_CAP_LIST
))
188 case PCI_HEADER_TYPE_NORMAL
:
189 case PCI_HEADER_TYPE_BRIDGE
:
190 return PCI_CAPABILITY_LIST
;
191 case PCI_HEADER_TYPE_CARDBUS
:
192 return PCI_CB_CAPABILITY_LIST
;
201 * pci_find_capability - query for devices' capabilities
202 * @dev: PCI device to query
203 * @cap: capability code
205 * Tell if a device supports a given PCI capability.
206 * Returns the address of the requested capability structure within the
207 * device's PCI configuration space or 0 in case the device does not
208 * support it. Possible values for @cap:
210 * %PCI_CAP_ID_PM Power Management
211 * %PCI_CAP_ID_AGP Accelerated Graphics Port
212 * %PCI_CAP_ID_VPD Vital Product Data
213 * %PCI_CAP_ID_SLOTID Slot Identification
214 * %PCI_CAP_ID_MSI Message Signalled Interrupts
215 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
216 * %PCI_CAP_ID_PCIX PCI-X
217 * %PCI_CAP_ID_EXP PCI Express
219 int pci_find_capability(struct pci_dev
*dev
, int cap
)
223 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
225 pos
= __pci_find_next_cap(dev
->bus
, dev
->devfn
, pos
, cap
);
231 * pci_bus_find_capability - query for devices' capabilities
232 * @bus: the PCI bus to query
233 * @devfn: PCI device to query
234 * @cap: capability code
236 * Like pci_find_capability() but works for pci devices that do not have a
237 * pci_dev structure set up yet.
239 * Returns the address of the requested capability structure within the
240 * device's PCI configuration space or 0 in case the device does not
243 int pci_bus_find_capability(struct pci_bus
*bus
, unsigned int devfn
, int cap
)
248 pci_bus_read_config_byte(bus
, devfn
, PCI_HEADER_TYPE
, &hdr_type
);
250 pos
= __pci_bus_find_cap_start(bus
, devfn
, hdr_type
& 0x7f);
252 pos
= __pci_find_next_cap(bus
, devfn
, pos
, cap
);
258 * pci_find_next_ext_capability - Find an extended capability
259 * @dev: PCI device to query
260 * @start: address at which to start looking (0 to start at beginning of list)
261 * @cap: capability code
263 * Returns the address of the next matching extended capability structure
264 * within the device's PCI configuration space or 0 if the device does
265 * not support it. Some capabilities can occur several times, e.g., the
266 * vendor-specific capability, and this provides a way to find them all.
268 int pci_find_next_ext_capability(struct pci_dev
*dev
, int start
, int cap
)
272 int pos
= PCI_CFG_SPACE_SIZE
;
274 /* minimum 8 bytes per capability */
275 ttl
= (PCI_CFG_SPACE_EXP_SIZE
- PCI_CFG_SPACE_SIZE
) / 8;
277 if (dev
->cfg_size
<= PCI_CFG_SPACE_SIZE
)
283 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
287 * If we have no capabilities, this is indicated by cap ID,
288 * cap version and next pointer all being 0.
294 if (PCI_EXT_CAP_ID(header
) == cap
&& pos
!= start
)
297 pos
= PCI_EXT_CAP_NEXT(header
);
298 if (pos
< PCI_CFG_SPACE_SIZE
)
301 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
307 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability
);
310 * pci_find_ext_capability - Find an extended capability
311 * @dev: PCI device to query
312 * @cap: capability code
314 * Returns the address of the requested extended capability structure
315 * within the device's PCI configuration space or 0 if the device does
316 * not support it. Possible values for @cap:
318 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
319 * %PCI_EXT_CAP_ID_VC Virtual Channel
320 * %PCI_EXT_CAP_ID_DSN Device Serial Number
321 * %PCI_EXT_CAP_ID_PWR Power Budgeting
323 int pci_find_ext_capability(struct pci_dev
*dev
, int cap
)
325 return pci_find_next_ext_capability(dev
, 0, cap
);
327 EXPORT_SYMBOL_GPL(pci_find_ext_capability
);
329 static int __pci_find_next_ht_cap(struct pci_dev
*dev
, int pos
, int ht_cap
)
331 int rc
, ttl
= PCI_FIND_CAP_TTL
;
334 if (ht_cap
== HT_CAPTYPE_SLAVE
|| ht_cap
== HT_CAPTYPE_HOST
)
335 mask
= HT_3BIT_CAP_MASK
;
337 mask
= HT_5BIT_CAP_MASK
;
339 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
, pos
,
340 PCI_CAP_ID_HT
, &ttl
);
342 rc
= pci_read_config_byte(dev
, pos
+ 3, &cap
);
343 if (rc
!= PCIBIOS_SUCCESSFUL
)
346 if ((cap
& mask
) == ht_cap
)
349 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
,
350 pos
+ PCI_CAP_LIST_NEXT
,
351 PCI_CAP_ID_HT
, &ttl
);
357 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
358 * @dev: PCI device to query
359 * @pos: Position from which to continue searching
360 * @ht_cap: Hypertransport capability code
362 * To be used in conjunction with pci_find_ht_capability() to search for
363 * all capabilities matching @ht_cap. @pos should always be a value returned
364 * from pci_find_ht_capability().
366 * NB. To be 100% safe against broken PCI devices, the caller should take
367 * steps to avoid an infinite loop.
369 int pci_find_next_ht_capability(struct pci_dev
*dev
, int pos
, int ht_cap
)
371 return __pci_find_next_ht_cap(dev
, pos
+ PCI_CAP_LIST_NEXT
, ht_cap
);
373 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability
);
376 * pci_find_ht_capability - query a device's Hypertransport capabilities
377 * @dev: PCI device to query
378 * @ht_cap: Hypertransport capability code
380 * Tell if a device supports a given Hypertransport capability.
381 * Returns an address within the device's PCI configuration space
382 * or 0 in case the device does not support the request capability.
383 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
384 * which has a Hypertransport capability matching @ht_cap.
386 int pci_find_ht_capability(struct pci_dev
*dev
, int ht_cap
)
390 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
392 pos
= __pci_find_next_ht_cap(dev
, pos
, ht_cap
);
396 EXPORT_SYMBOL_GPL(pci_find_ht_capability
);
399 * pci_find_parent_resource - return resource region of parent bus of given region
400 * @dev: PCI device structure contains resources to be searched
401 * @res: child resource record for which parent is sought
403 * For given resource region of given device, return the resource
404 * region of parent bus the given region is contained in or where
405 * it should be allocated from.
408 pci_find_parent_resource(const struct pci_dev
*dev
, struct resource
*res
)
410 const struct pci_bus
*bus
= dev
->bus
;
412 struct resource
*best
= NULL
, *r
;
414 pci_bus_for_each_resource(bus
, r
, i
) {
417 if (res
->start
&& !(res
->start
>= r
->start
&& res
->end
<= r
->end
))
418 continue; /* Not contained */
419 if ((res
->flags
^ r
->flags
) & (IORESOURCE_IO
| IORESOURCE_MEM
))
420 continue; /* Wrong type */
421 if (!((res
->flags
^ r
->flags
) & IORESOURCE_PREFETCH
))
422 return r
; /* Exact match */
423 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
424 if (r
->flags
& IORESOURCE_PREFETCH
)
426 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
434 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
435 * @dev: PCI device to have its BARs restored
437 * Restore the BAR values for a given device, so as to make it
438 * accessible by its driver.
441 pci_restore_bars(struct pci_dev
*dev
)
445 for (i
= 0; i
< PCI_BRIDGE_RESOURCES
; i
++)
446 pci_update_resource(dev
, i
);
449 static struct pci_platform_pm_ops
*pci_platform_pm
;
451 int pci_set_platform_pm(struct pci_platform_pm_ops
*ops
)
453 if (!ops
->is_manageable
|| !ops
->set_state
|| !ops
->choose_state
456 pci_platform_pm
= ops
;
460 static inline bool platform_pci_power_manageable(struct pci_dev
*dev
)
462 return pci_platform_pm
? pci_platform_pm
->is_manageable(dev
) : false;
465 static inline int platform_pci_set_power_state(struct pci_dev
*dev
,
468 return pci_platform_pm
? pci_platform_pm
->set_state(dev
, t
) : -ENOSYS
;
471 static inline pci_power_t
platform_pci_choose_state(struct pci_dev
*dev
)
473 return pci_platform_pm
?
474 pci_platform_pm
->choose_state(dev
) : PCI_POWER_ERROR
;
477 static inline int platform_pci_sleep_wake(struct pci_dev
*dev
, bool enable
)
479 return pci_platform_pm
?
480 pci_platform_pm
->sleep_wake(dev
, enable
) : -ENODEV
;
483 static inline int platform_pci_run_wake(struct pci_dev
*dev
, bool enable
)
485 return pci_platform_pm
?
486 pci_platform_pm
->run_wake(dev
, enable
) : -ENODEV
;
490 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
492 * @dev: PCI device to handle.
493 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
496 * -EINVAL if the requested state is invalid.
497 * -EIO if device does not support PCI PM or its PM capabilities register has a
498 * wrong version, or device doesn't support the requested state.
499 * 0 if device already is in the requested state.
500 * 0 if device's power state has been successfully changed.
502 static int pci_raw_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
505 bool need_restore
= false;
507 /* Check if we're already there */
508 if (dev
->current_state
== state
)
514 if (state
< PCI_D0
|| state
> PCI_D3hot
)
517 /* Validate current state:
518 * Can enter D0 from any state, but if we can only go deeper
519 * to sleep if we're already in a low power state
521 if (state
!= PCI_D0
&& dev
->current_state
<= PCI_D3cold
522 && dev
->current_state
> state
) {
523 dev_err(&dev
->dev
, "invalid power transition "
524 "(from state %d to %d)\n", dev
->current_state
, state
);
528 /* check if this device supports the desired state */
529 if ((state
== PCI_D1
&& !dev
->d1_support
)
530 || (state
== PCI_D2
&& !dev
->d2_support
))
533 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
535 /* If we're (effectively) in D3, force entire word to 0.
536 * This doesn't affect PME_Status, disables PME_En, and
537 * sets PowerState to 0.
539 switch (dev
->current_state
) {
543 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
548 case PCI_UNKNOWN
: /* Boot-up */
549 if ((pmcsr
& PCI_PM_CTRL_STATE_MASK
) == PCI_D3hot
550 && !(pmcsr
& PCI_PM_CTRL_NO_SOFT_RESET
))
552 /* Fall-through: force to D0 */
558 /* enter specified state */
559 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
561 /* Mandatory power management transition delays */
562 /* see PCI PM 1.1 5.6.1 table 18 */
563 if (state
== PCI_D3hot
|| dev
->current_state
== PCI_D3hot
)
564 pci_dev_d3_sleep(dev
);
565 else if (state
== PCI_D2
|| dev
->current_state
== PCI_D2
)
566 udelay(PCI_PM_D2_DELAY
);
568 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
569 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
570 if (dev
->current_state
!= state
&& printk_ratelimit())
571 dev_info(&dev
->dev
, "Refused to change power state, "
572 "currently in D%d\n", dev
->current_state
);
575 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
576 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
577 * from D3hot to D0 _may_ perform an internal reset, thereby
578 * going to "D0 Uninitialized" rather than "D0 Initialized".
579 * For example, at least some versions of the 3c905B and the
580 * 3c556B exhibit this behaviour.
582 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
583 * devices in a D3hot state at boot. Consequently, we need to
584 * restore at least the BARs so that the device will be
585 * accessible to its driver.
588 pci_restore_bars(dev
);
591 pcie_aspm_pm_state_change(dev
->bus
->self
);
597 * pci_update_current_state - Read PCI power state of given device from its
598 * PCI PM registers and cache it
599 * @dev: PCI device to handle.
600 * @state: State to cache in case the device doesn't have the PM capability
602 void pci_update_current_state(struct pci_dev
*dev
, pci_power_t state
)
608 * Configuration space is not accessible for device in
609 * D3cold, so just keep or set D3cold for safety
611 if (dev
->current_state
== PCI_D3cold
)
613 if (state
== PCI_D3cold
) {
614 dev
->current_state
= PCI_D3cold
;
617 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
618 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
620 dev
->current_state
= state
;
625 * pci_power_up - Put the given device into D0 forcibly
626 * @dev: PCI device to power up
628 void pci_power_up(struct pci_dev
*dev
)
630 if (platform_pci_power_manageable(dev
))
631 platform_pci_set_power_state(dev
, PCI_D0
);
633 pci_raw_set_power_state(dev
, PCI_D0
);
634 pci_update_current_state(dev
, PCI_D0
);
638 * pci_platform_power_transition - Use platform to change device power state
639 * @dev: PCI device to handle.
640 * @state: State to put the device into.
642 static int pci_platform_power_transition(struct pci_dev
*dev
, pci_power_t state
)
646 if (platform_pci_power_manageable(dev
)) {
647 error
= platform_pci_set_power_state(dev
, state
);
649 pci_update_current_state(dev
, state
);
653 if (error
&& !dev
->pm_cap
) /* Fall back to PCI_D0 */
654 dev
->current_state
= PCI_D0
;
660 * __pci_start_power_transition - Start power transition of a PCI device
661 * @dev: PCI device to handle.
662 * @state: State to put the device into.
664 static void __pci_start_power_transition(struct pci_dev
*dev
, pci_power_t state
)
666 if (state
== PCI_D0
) {
667 pci_platform_power_transition(dev
, PCI_D0
);
669 * Mandatory power management transition delays, see
670 * PCI Express Base Specification Revision 2.0 Section
671 * 6.6.1: Conventional Reset. Do not delay for
672 * devices powered on/off by corresponding bridge,
673 * because have already delayed for the bridge.
675 if (dev
->runtime_d3cold
) {
676 msleep(dev
->d3cold_delay
);
678 * When powering on a bridge from D3cold, the
679 * whole hierarchy may be powered on into
680 * D0uninitialized state, resume them to give
681 * them a chance to suspend again
683 pci_wakeup_bus(dev
->subordinate
);
689 * __pci_dev_set_current_state - Set current state of a PCI device
690 * @dev: Device to handle
691 * @data: pointer to state to be set
693 static int __pci_dev_set_current_state(struct pci_dev
*dev
, void *data
)
695 pci_power_t state
= *(pci_power_t
*)data
;
697 dev
->current_state
= state
;
702 * __pci_bus_set_current_state - Walk given bus and set current state of devices
703 * @bus: Top bus of the subtree to walk.
704 * @state: state to be set
706 static void __pci_bus_set_current_state(struct pci_bus
*bus
, pci_power_t state
)
709 pci_walk_bus(bus
, __pci_dev_set_current_state
, &state
);
713 * __pci_complete_power_transition - Complete power transition of a PCI device
714 * @dev: PCI device to handle.
715 * @state: State to put the device into.
717 * This function should not be called directly by device drivers.
719 int __pci_complete_power_transition(struct pci_dev
*dev
, pci_power_t state
)
725 ret
= pci_platform_power_transition(dev
, state
);
726 /* Power off the bridge may power off the whole hierarchy */
727 if (!ret
&& state
== PCI_D3cold
)
728 __pci_bus_set_current_state(dev
->subordinate
, PCI_D3cold
);
731 EXPORT_SYMBOL_GPL(__pci_complete_power_transition
);
734 * pci_set_power_state - Set the power state of a PCI device
735 * @dev: PCI device to handle.
736 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
738 * Transition a device to a new power state, using the platform firmware and/or
739 * the device's PCI PM registers.
742 * -EINVAL if the requested state is invalid.
743 * -EIO if device does not support PCI PM or its PM capabilities register has a
744 * wrong version, or device doesn't support the requested state.
745 * 0 if device already is in the requested state.
746 * 0 if device's power state has been successfully changed.
748 int pci_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
752 /* bound the state we're entering */
753 if (state
> PCI_D3cold
)
755 else if (state
< PCI_D0
)
757 else if ((state
== PCI_D1
|| state
== PCI_D2
) && pci_no_d1d2(dev
))
759 * If the device or the parent bridge do not support PCI PM,
760 * ignore the request if we're doing anything other than putting
761 * it into D0 (which would only happen on boot).
765 /* Check if we're already there */
766 if (dev
->current_state
== state
)
769 __pci_start_power_transition(dev
, state
);
771 /* This device is quirked not to be put into D3, so
772 don't put it in D3 */
773 if (state
>= PCI_D3hot
&& (dev
->dev_flags
& PCI_DEV_FLAGS_NO_D3
))
777 * To put device in D3cold, we put device into D3hot in native
778 * way, then put device into D3cold with platform ops
780 error
= pci_raw_set_power_state(dev
, state
> PCI_D3hot
?
783 if (!__pci_complete_power_transition(dev
, state
))
786 * When aspm_policy is "powersave" this call ensures
787 * that ASPM is configured.
789 if (!error
&& dev
->bus
->self
)
790 pcie_aspm_powersave_config_link(dev
->bus
->self
);
796 * pci_choose_state - Choose the power state of a PCI device
797 * @dev: PCI device to be suspended
798 * @state: target sleep state for the whole system. This is the value
799 * that is passed to suspend() function.
801 * Returns PCI power state suitable for given device and given system
805 pci_power_t
pci_choose_state(struct pci_dev
*dev
, pm_message_t state
)
812 ret
= platform_pci_choose_state(dev
);
813 if (ret
!= PCI_POWER_ERROR
)
816 switch (state
.event
) {
819 case PM_EVENT_FREEZE
:
820 case PM_EVENT_PRETHAW
:
821 /* REVISIT both freeze and pre-thaw "should" use D0 */
822 case PM_EVENT_SUSPEND
:
823 case PM_EVENT_HIBERNATE
:
826 dev_info(&dev
->dev
, "unrecognized suspend event %d\n",
833 EXPORT_SYMBOL(pci_choose_state
);
835 #define PCI_EXP_SAVE_REGS 7
838 static struct pci_cap_saved_state
*pci_find_saved_cap(
839 struct pci_dev
*pci_dev
, char cap
)
841 struct pci_cap_saved_state
*tmp
;
843 hlist_for_each_entry(tmp
, &pci_dev
->saved_cap_space
, next
) {
844 if (tmp
->cap
.cap_nr
== cap
)
850 static int pci_save_pcie_state(struct pci_dev
*dev
)
853 struct pci_cap_saved_state
*save_state
;
856 if (!pci_is_pcie(dev
))
859 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
861 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
865 cap
= (u16
*)&save_state
->cap
.data
[0];
866 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &cap
[i
++]);
867 pcie_capability_read_word(dev
, PCI_EXP_LNKCTL
, &cap
[i
++]);
868 pcie_capability_read_word(dev
, PCI_EXP_SLTCTL
, &cap
[i
++]);
869 pcie_capability_read_word(dev
, PCI_EXP_RTCTL
, &cap
[i
++]);
870 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL2
, &cap
[i
++]);
871 pcie_capability_read_word(dev
, PCI_EXP_LNKCTL2
, &cap
[i
++]);
872 pcie_capability_read_word(dev
, PCI_EXP_SLTCTL2
, &cap
[i
++]);
877 static void pci_restore_pcie_state(struct pci_dev
*dev
)
880 struct pci_cap_saved_state
*save_state
;
883 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
887 cap
= (u16
*)&save_state
->cap
.data
[0];
888 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL
, cap
[i
++]);
889 pcie_capability_write_word(dev
, PCI_EXP_LNKCTL
, cap
[i
++]);
890 pcie_capability_write_word(dev
, PCI_EXP_SLTCTL
, cap
[i
++]);
891 pcie_capability_write_word(dev
, PCI_EXP_RTCTL
, cap
[i
++]);
892 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL2
, cap
[i
++]);
893 pcie_capability_write_word(dev
, PCI_EXP_LNKCTL2
, cap
[i
++]);
894 pcie_capability_write_word(dev
, PCI_EXP_SLTCTL2
, cap
[i
++]);
898 static int pci_save_pcix_state(struct pci_dev
*dev
)
901 struct pci_cap_saved_state
*save_state
;
903 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
907 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
909 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
913 pci_read_config_word(dev
, pos
+ PCI_X_CMD
,
914 (u16
*)save_state
->cap
.data
);
919 static void pci_restore_pcix_state(struct pci_dev
*dev
)
922 struct pci_cap_saved_state
*save_state
;
925 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
926 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
927 if (!save_state
|| pos
<= 0)
929 cap
= (u16
*)&save_state
->cap
.data
[0];
931 pci_write_config_word(dev
, pos
+ PCI_X_CMD
, cap
[i
++]);
936 * pci_save_state - save the PCI configuration space of a device before suspending
937 * @dev: - PCI device that we're dealing with
940 pci_save_state(struct pci_dev
*dev
)
943 /* XXX: 100% dword access ok here? */
944 for (i
= 0; i
< 16; i
++)
945 pci_read_config_dword(dev
, i
* 4, &dev
->saved_config_space
[i
]);
946 dev
->state_saved
= true;
947 if ((i
= pci_save_pcie_state(dev
)) != 0)
949 if ((i
= pci_save_pcix_state(dev
)) != 0)
954 static void pci_restore_config_dword(struct pci_dev
*pdev
, int offset
,
955 u32 saved_val
, int retry
)
959 pci_read_config_dword(pdev
, offset
, &val
);
960 if (val
== saved_val
)
964 dev_dbg(&pdev
->dev
, "restoring config space at offset "
965 "%#x (was %#x, writing %#x)\n", offset
, val
, saved_val
);
966 pci_write_config_dword(pdev
, offset
, saved_val
);
970 pci_read_config_dword(pdev
, offset
, &val
);
971 if (val
== saved_val
)
978 static void pci_restore_config_space_range(struct pci_dev
*pdev
,
979 int start
, int end
, int retry
)
983 for (index
= end
; index
>= start
; index
--)
984 pci_restore_config_dword(pdev
, 4 * index
,
985 pdev
->saved_config_space
[index
],
989 static void pci_restore_config_space(struct pci_dev
*pdev
)
991 if (pdev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
) {
992 pci_restore_config_space_range(pdev
, 10, 15, 0);
993 /* Restore BARs before the command register. */
994 pci_restore_config_space_range(pdev
, 4, 9, 10);
995 pci_restore_config_space_range(pdev
, 0, 3, 0);
997 pci_restore_config_space_range(pdev
, 0, 15, 0);
1002 * pci_restore_state - Restore the saved state of a PCI device
1003 * @dev: - PCI device that we're dealing with
1005 void pci_restore_state(struct pci_dev
*dev
)
1007 if (!dev
->state_saved
)
1010 /* PCI Express register must be restored first */
1011 pci_restore_pcie_state(dev
);
1012 pci_restore_ats_state(dev
);
1014 pci_restore_config_space(dev
);
1016 pci_restore_pcix_state(dev
);
1017 pci_restore_msi_state(dev
);
1018 pci_restore_iov_state(dev
);
1020 dev
->state_saved
= false;
1023 struct pci_saved_state
{
1024 u32 config_space
[16];
1025 struct pci_cap_saved_data cap
[0];
1029 * pci_store_saved_state - Allocate and return an opaque struct containing
1030 * the device saved state.
1031 * @dev: PCI device that we're dealing with
1033 * Rerturn NULL if no state or error.
1035 struct pci_saved_state
*pci_store_saved_state(struct pci_dev
*dev
)
1037 struct pci_saved_state
*state
;
1038 struct pci_cap_saved_state
*tmp
;
1039 struct pci_cap_saved_data
*cap
;
1042 if (!dev
->state_saved
)
1045 size
= sizeof(*state
) + sizeof(struct pci_cap_saved_data
);
1047 hlist_for_each_entry(tmp
, &dev
->saved_cap_space
, next
)
1048 size
+= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1050 state
= kzalloc(size
, GFP_KERNEL
);
1054 memcpy(state
->config_space
, dev
->saved_config_space
,
1055 sizeof(state
->config_space
));
1058 hlist_for_each_entry(tmp
, &dev
->saved_cap_space
, next
) {
1059 size_t len
= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1060 memcpy(cap
, &tmp
->cap
, len
);
1061 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+ len
);
1063 /* Empty cap_save terminates list */
1067 EXPORT_SYMBOL_GPL(pci_store_saved_state
);
1070 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1071 * @dev: PCI device that we're dealing with
1072 * @state: Saved state returned from pci_store_saved_state()
1074 int pci_load_saved_state(struct pci_dev
*dev
, struct pci_saved_state
*state
)
1076 struct pci_cap_saved_data
*cap
;
1078 dev
->state_saved
= false;
1083 memcpy(dev
->saved_config_space
, state
->config_space
,
1084 sizeof(state
->config_space
));
1088 struct pci_cap_saved_state
*tmp
;
1090 tmp
= pci_find_saved_cap(dev
, cap
->cap_nr
);
1091 if (!tmp
|| tmp
->cap
.size
!= cap
->size
)
1094 memcpy(tmp
->cap
.data
, cap
->data
, tmp
->cap
.size
);
1095 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+
1096 sizeof(struct pci_cap_saved_data
) + cap
->size
);
1099 dev
->state_saved
= true;
1102 EXPORT_SYMBOL_GPL(pci_load_saved_state
);
1105 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1106 * and free the memory allocated for it.
1107 * @dev: PCI device that we're dealing with
1108 * @state: Pointer to saved state returned from pci_store_saved_state()
1110 int pci_load_and_free_saved_state(struct pci_dev
*dev
,
1111 struct pci_saved_state
**state
)
1113 int ret
= pci_load_saved_state(dev
, *state
);
1118 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state
);
1120 static int do_pci_enable_device(struct pci_dev
*dev
, int bars
)
1124 err
= pci_set_power_state(dev
, PCI_D0
);
1125 if (err
< 0 && err
!= -EIO
)
1127 err
= pcibios_enable_device(dev
, bars
);
1130 pci_fixup_device(pci_fixup_enable
, dev
);
1136 * pci_reenable_device - Resume abandoned device
1137 * @dev: PCI device to be resumed
1139 * Note this function is a backend of pci_default_resume and is not supposed
1140 * to be called by normal code, write proper resume handler and use it instead.
1142 int pci_reenable_device(struct pci_dev
*dev
)
1144 if (pci_is_enabled(dev
))
1145 return do_pci_enable_device(dev
, (1 << PCI_NUM_RESOURCES
) - 1);
1149 static int pci_enable_device_flags(struct pci_dev
*dev
, unsigned long flags
)
1155 * Power state could be unknown at this point, either due to a fresh
1156 * boot or a device removal call. So get the current power state
1157 * so that things like MSI message writing will behave as expected
1158 * (e.g. if the device really is in D0 at enable time).
1162 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1163 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
1166 if (atomic_inc_return(&dev
->enable_cnt
) > 1)
1167 return 0; /* already enabled */
1169 /* only skip sriov related */
1170 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++)
1171 if (dev
->resource
[i
].flags
& flags
)
1173 for (i
= PCI_BRIDGE_RESOURCES
; i
< DEVICE_COUNT_RESOURCE
; i
++)
1174 if (dev
->resource
[i
].flags
& flags
)
1177 err
= do_pci_enable_device(dev
, bars
);
1179 atomic_dec(&dev
->enable_cnt
);
1184 * pci_enable_device_io - Initialize a device for use with IO space
1185 * @dev: PCI device to be initialized
1187 * Initialize device before it's used by a driver. Ask low-level code
1188 * to enable I/O resources. Wake up the device if it was suspended.
1189 * Beware, this function can fail.
1191 int pci_enable_device_io(struct pci_dev
*dev
)
1193 return pci_enable_device_flags(dev
, IORESOURCE_IO
);
1197 * pci_enable_device_mem - Initialize a device for use with Memory space
1198 * @dev: PCI device to be initialized
1200 * Initialize device before it's used by a driver. Ask low-level code
1201 * to enable Memory resources. Wake up the device if it was suspended.
1202 * Beware, this function can fail.
1204 int pci_enable_device_mem(struct pci_dev
*dev
)
1206 return pci_enable_device_flags(dev
, IORESOURCE_MEM
);
1210 * pci_enable_device - Initialize device before it's used by a driver.
1211 * @dev: PCI device to be initialized
1213 * Initialize device before it's used by a driver. Ask low-level code
1214 * to enable I/O and memory. Wake up the device if it was suspended.
1215 * Beware, this function can fail.
1217 * Note we don't actually enable the device many times if we call
1218 * this function repeatedly (we just increment the count).
1220 int pci_enable_device(struct pci_dev
*dev
)
1222 return pci_enable_device_flags(dev
, IORESOURCE_MEM
| IORESOURCE_IO
);
1226 * Managed PCI resources. This manages device on/off, intx/msi/msix
1227 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1228 * there's no need to track it separately. pci_devres is initialized
1229 * when a device is enabled using managed PCI device enable interface.
1232 unsigned int enabled
:1;
1233 unsigned int pinned
:1;
1234 unsigned int orig_intx
:1;
1235 unsigned int restore_intx
:1;
1239 static void pcim_release(struct device
*gendev
, void *res
)
1241 struct pci_dev
*dev
= container_of(gendev
, struct pci_dev
, dev
);
1242 struct pci_devres
*this = res
;
1245 if (dev
->msi_enabled
)
1246 pci_disable_msi(dev
);
1247 if (dev
->msix_enabled
)
1248 pci_disable_msix(dev
);
1250 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
1251 if (this->region_mask
& (1 << i
))
1252 pci_release_region(dev
, i
);
1254 if (this->restore_intx
)
1255 pci_intx(dev
, this->orig_intx
);
1257 if (this->enabled
&& !this->pinned
)
1258 pci_disable_device(dev
);
1261 static struct pci_devres
* get_pci_dr(struct pci_dev
*pdev
)
1263 struct pci_devres
*dr
, *new_dr
;
1265 dr
= devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1269 new_dr
= devres_alloc(pcim_release
, sizeof(*new_dr
), GFP_KERNEL
);
1272 return devres_get(&pdev
->dev
, new_dr
, NULL
, NULL
);
1275 static struct pci_devres
* find_pci_dr(struct pci_dev
*pdev
)
1277 if (pci_is_managed(pdev
))
1278 return devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1283 * pcim_enable_device - Managed pci_enable_device()
1284 * @pdev: PCI device to be initialized
1286 * Managed pci_enable_device().
1288 int pcim_enable_device(struct pci_dev
*pdev
)
1290 struct pci_devres
*dr
;
1293 dr
= get_pci_dr(pdev
);
1299 rc
= pci_enable_device(pdev
);
1301 pdev
->is_managed
= 1;
1308 * pcim_pin_device - Pin managed PCI device
1309 * @pdev: PCI device to pin
1311 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1312 * driver detach. @pdev must have been enabled with
1313 * pcim_enable_device().
1315 void pcim_pin_device(struct pci_dev
*pdev
)
1317 struct pci_devres
*dr
;
1319 dr
= find_pci_dr(pdev
);
1320 WARN_ON(!dr
|| !dr
->enabled
);
1326 * pcibios_add_device - provide arch specific hooks when adding device dev
1327 * @dev: the PCI device being added
1329 * Permits the platform to provide architecture specific functionality when
1330 * devices are added. This is the default implementation. Architecture
1331 * implementations can override this.
1333 int __weak
pcibios_add_device (struct pci_dev
*dev
)
1339 * pcibios_release_device - provide arch specific hooks when releasing device dev
1340 * @dev: the PCI device being released
1342 * Permits the platform to provide architecture specific functionality when
1343 * devices are released. This is the default implementation. Architecture
1344 * implementations can override this.
1346 void __weak
pcibios_release_device(struct pci_dev
*dev
) {}
1349 * pcibios_disable_device - disable arch specific PCI resources for device dev
1350 * @dev: the PCI device to disable
1352 * Disables architecture specific PCI resources for the device. This
1353 * is the default implementation. Architecture implementations can
1356 void __weak
pcibios_disable_device (struct pci_dev
*dev
) {}
1358 static void do_pci_disable_device(struct pci_dev
*dev
)
1362 pci_read_config_word(dev
, PCI_COMMAND
, &pci_command
);
1363 if (pci_command
& PCI_COMMAND_MASTER
) {
1364 pci_command
&= ~PCI_COMMAND_MASTER
;
1365 pci_write_config_word(dev
, PCI_COMMAND
, pci_command
);
1368 pcibios_disable_device(dev
);
1372 * pci_disable_enabled_device - Disable device without updating enable_cnt
1373 * @dev: PCI device to disable
1375 * NOTE: This function is a backend of PCI power management routines and is
1376 * not supposed to be called drivers.
1378 void pci_disable_enabled_device(struct pci_dev
*dev
)
1380 if (pci_is_enabled(dev
))
1381 do_pci_disable_device(dev
);
1385 * pci_disable_device - Disable PCI device after use
1386 * @dev: PCI device to be disabled
1388 * Signal to the system that the PCI device is not in use by the system
1389 * anymore. This only involves disabling PCI bus-mastering, if active.
1391 * Note we don't actually disable the device until all callers of
1392 * pci_enable_device() have called pci_disable_device().
1395 pci_disable_device(struct pci_dev
*dev
)
1397 struct pci_devres
*dr
;
1399 dr
= find_pci_dr(dev
);
1403 dev_WARN_ONCE(&dev
->dev
, atomic_read(&dev
->enable_cnt
) <= 0,
1404 "disabling already-disabled device");
1406 if (atomic_dec_return(&dev
->enable_cnt
) != 0)
1409 do_pci_disable_device(dev
);
1411 dev
->is_busmaster
= 0;
1415 * pcibios_set_pcie_reset_state - set reset state for device dev
1416 * @dev: the PCIe device reset
1417 * @state: Reset state to enter into
1420 * Sets the PCIe reset state for the device. This is the default
1421 * implementation. Architecture implementations can override this.
1423 int __weak
pcibios_set_pcie_reset_state(struct pci_dev
*dev
,
1424 enum pcie_reset_state state
)
1430 * pci_set_pcie_reset_state - set reset state for device dev
1431 * @dev: the PCIe device reset
1432 * @state: Reset state to enter into
1435 * Sets the PCI reset state for the device.
1437 int pci_set_pcie_reset_state(struct pci_dev
*dev
, enum pcie_reset_state state
)
1439 return pcibios_set_pcie_reset_state(dev
, state
);
1443 * pci_check_pme_status - Check if given device has generated PME.
1444 * @dev: Device to check.
1446 * Check the PME status of the device and if set, clear it and clear PME enable
1447 * (if set). Return 'true' if PME status and PME enable were both set or
1448 * 'false' otherwise.
1450 bool pci_check_pme_status(struct pci_dev
*dev
)
1459 pmcsr_pos
= dev
->pm_cap
+ PCI_PM_CTRL
;
1460 pci_read_config_word(dev
, pmcsr_pos
, &pmcsr
);
1461 if (!(pmcsr
& PCI_PM_CTRL_PME_STATUS
))
1464 /* Clear PME status. */
1465 pmcsr
|= PCI_PM_CTRL_PME_STATUS
;
1466 if (pmcsr
& PCI_PM_CTRL_PME_ENABLE
) {
1467 /* Disable PME to avoid interrupt flood. */
1468 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1472 pci_write_config_word(dev
, pmcsr_pos
, pmcsr
);
1478 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1479 * @dev: Device to handle.
1480 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1482 * Check if @dev has generated PME and queue a resume request for it in that
1485 static int pci_pme_wakeup(struct pci_dev
*dev
, void *pme_poll_reset
)
1487 if (pme_poll_reset
&& dev
->pme_poll
)
1488 dev
->pme_poll
= false;
1490 if (pci_check_pme_status(dev
)) {
1491 pci_wakeup_event(dev
);
1492 pm_request_resume(&dev
->dev
);
1498 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1499 * @bus: Top bus of the subtree to walk.
1501 void pci_pme_wakeup_bus(struct pci_bus
*bus
)
1504 pci_walk_bus(bus
, pci_pme_wakeup
, (void *)true);
1508 * pci_wakeup - Wake up a PCI device
1509 * @pci_dev: Device to handle.
1510 * @ign: ignored parameter
1512 static int pci_wakeup(struct pci_dev
*pci_dev
, void *ign
)
1514 pci_wakeup_event(pci_dev
);
1515 pm_request_resume(&pci_dev
->dev
);
1520 * pci_wakeup_bus - Walk given bus and wake up devices on it
1521 * @bus: Top bus of the subtree to walk.
1523 void pci_wakeup_bus(struct pci_bus
*bus
)
1526 pci_walk_bus(bus
, pci_wakeup
, NULL
);
1530 * pci_pme_capable - check the capability of PCI device to generate PME#
1531 * @dev: PCI device to handle.
1532 * @state: PCI state from which device will issue PME#.
1534 bool pci_pme_capable(struct pci_dev
*dev
, pci_power_t state
)
1539 return !!(dev
->pme_support
& (1 << state
));
1542 static void pci_pme_list_scan(struct work_struct
*work
)
1544 struct pci_pme_device
*pme_dev
, *n
;
1546 mutex_lock(&pci_pme_list_mutex
);
1547 if (!list_empty(&pci_pme_list
)) {
1548 list_for_each_entry_safe(pme_dev
, n
, &pci_pme_list
, list
) {
1549 if (pme_dev
->dev
->pme_poll
) {
1550 struct pci_dev
*bridge
;
1552 bridge
= pme_dev
->dev
->bus
->self
;
1554 * If bridge is in low power state, the
1555 * configuration space of subordinate devices
1556 * may be not accessible
1558 if (bridge
&& bridge
->current_state
!= PCI_D0
)
1560 pci_pme_wakeup(pme_dev
->dev
, NULL
);
1562 list_del(&pme_dev
->list
);
1566 if (!list_empty(&pci_pme_list
))
1567 schedule_delayed_work(&pci_pme_work
,
1568 msecs_to_jiffies(PME_TIMEOUT
));
1570 mutex_unlock(&pci_pme_list_mutex
);
1574 * pci_pme_active - enable or disable PCI device's PME# function
1575 * @dev: PCI device to handle.
1576 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1578 * The caller must verify that the device is capable of generating PME# before
1579 * calling this function with @enable equal to 'true'.
1581 void pci_pme_active(struct pci_dev
*dev
, bool enable
)
1585 if (!dev
->pme_support
)
1588 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1589 /* Clear PME_Status by writing 1 to it and enable PME# */
1590 pmcsr
|= PCI_PM_CTRL_PME_STATUS
| PCI_PM_CTRL_PME_ENABLE
;
1592 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1594 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
1597 * PCI (as opposed to PCIe) PME requires that the device have
1598 * its PME# line hooked up correctly. Not all hardware vendors
1599 * do this, so the PME never gets delivered and the device
1600 * remains asleep. The easiest way around this is to
1601 * periodically walk the list of suspended devices and check
1602 * whether any have their PME flag set. The assumption is that
1603 * we'll wake up often enough anyway that this won't be a huge
1604 * hit, and the power savings from the devices will still be a
1607 * Although PCIe uses in-band PME message instead of PME# line
1608 * to report PME, PME does not work for some PCIe devices in
1609 * reality. For example, there are devices that set their PME
1610 * status bits, but don't really bother to send a PME message;
1611 * there are PCI Express Root Ports that don't bother to
1612 * trigger interrupts when they receive PME messages from the
1613 * devices below. So PME poll is used for PCIe devices too.
1616 if (dev
->pme_poll
) {
1617 struct pci_pme_device
*pme_dev
;
1619 pme_dev
= kmalloc(sizeof(struct pci_pme_device
),
1624 mutex_lock(&pci_pme_list_mutex
);
1625 list_add(&pme_dev
->list
, &pci_pme_list
);
1626 if (list_is_singular(&pci_pme_list
))
1627 schedule_delayed_work(&pci_pme_work
,
1628 msecs_to_jiffies(PME_TIMEOUT
));
1629 mutex_unlock(&pci_pme_list_mutex
);
1631 mutex_lock(&pci_pme_list_mutex
);
1632 list_for_each_entry(pme_dev
, &pci_pme_list
, list
) {
1633 if (pme_dev
->dev
== dev
) {
1634 list_del(&pme_dev
->list
);
1639 mutex_unlock(&pci_pme_list_mutex
);
1644 dev_dbg(&dev
->dev
, "PME# %s\n", enable
? "enabled" : "disabled");
1648 * __pci_enable_wake - enable PCI device as wakeup event source
1649 * @dev: PCI device affected
1650 * @state: PCI state from which device will issue wakeup events
1651 * @runtime: True if the events are to be generated at run time
1652 * @enable: True to enable event generation; false to disable
1654 * This enables the device as a wakeup event source, or disables it.
1655 * When such events involves platform-specific hooks, those hooks are
1656 * called automatically by this routine.
1658 * Devices with legacy power management (no standard PCI PM capabilities)
1659 * always require such platform hooks.
1662 * 0 is returned on success
1663 * -EINVAL is returned if device is not supposed to wake up the system
1664 * Error code depending on the platform is returned if both the platform and
1665 * the native mechanism fail to enable the generation of wake-up events
1667 int __pci_enable_wake(struct pci_dev
*dev
, pci_power_t state
,
1668 bool runtime
, bool enable
)
1672 if (enable
&& !runtime
&& !device_may_wakeup(&dev
->dev
))
1675 /* Don't do the same thing twice in a row for one device. */
1676 if (!!enable
== !!dev
->wakeup_prepared
)
1680 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1681 * Anderson we should be doing PME# wake enable followed by ACPI wake
1682 * enable. To disable wake-up we call the platform first, for symmetry.
1688 if (pci_pme_capable(dev
, state
))
1689 pci_pme_active(dev
, true);
1692 error
= runtime
? platform_pci_run_wake(dev
, true) :
1693 platform_pci_sleep_wake(dev
, true);
1697 dev
->wakeup_prepared
= true;
1700 platform_pci_run_wake(dev
, false);
1702 platform_pci_sleep_wake(dev
, false);
1703 pci_pme_active(dev
, false);
1704 dev
->wakeup_prepared
= false;
1709 EXPORT_SYMBOL(__pci_enable_wake
);
1712 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1713 * @dev: PCI device to prepare
1714 * @enable: True to enable wake-up event generation; false to disable
1716 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1717 * and this function allows them to set that up cleanly - pci_enable_wake()
1718 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1719 * ordering constraints.
1721 * This function only returns error code if the device is not capable of
1722 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1723 * enable wake-up power for it.
1725 int pci_wake_from_d3(struct pci_dev
*dev
, bool enable
)
1727 return pci_pme_capable(dev
, PCI_D3cold
) ?
1728 pci_enable_wake(dev
, PCI_D3cold
, enable
) :
1729 pci_enable_wake(dev
, PCI_D3hot
, enable
);
1733 * pci_target_state - find an appropriate low power state for a given PCI dev
1736 * Use underlying platform code to find a supported low power state for @dev.
1737 * If the platform can't manage @dev, return the deepest state from which it
1738 * can generate wake events, based on any available PME info.
1740 pci_power_t
pci_target_state(struct pci_dev
*dev
)
1742 pci_power_t target_state
= PCI_D3hot
;
1744 if (platform_pci_power_manageable(dev
)) {
1746 * Call the platform to choose the target state of the device
1747 * and enable wake-up from this state if supported.
1749 pci_power_t state
= platform_pci_choose_state(dev
);
1752 case PCI_POWER_ERROR
:
1757 if (pci_no_d1d2(dev
))
1760 target_state
= state
;
1762 } else if (!dev
->pm_cap
) {
1763 target_state
= PCI_D0
;
1764 } else if (device_may_wakeup(&dev
->dev
)) {
1766 * Find the deepest state from which the device can generate
1767 * wake-up events, make it the target state and enable device
1770 if (dev
->pme_support
) {
1772 && !(dev
->pme_support
& (1 << target_state
)))
1777 return target_state
;
1781 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1782 * @dev: Device to handle.
1784 * Choose the power state appropriate for the device depending on whether
1785 * it can wake up the system and/or is power manageable by the platform
1786 * (PCI_D3hot is the default) and put the device into that state.
1788 int pci_prepare_to_sleep(struct pci_dev
*dev
)
1790 pci_power_t target_state
= pci_target_state(dev
);
1793 if (target_state
== PCI_POWER_ERROR
)
1796 /* D3cold during system suspend/hibernate is not supported */
1797 if (target_state
> PCI_D3hot
)
1798 target_state
= PCI_D3hot
;
1800 pci_enable_wake(dev
, target_state
, device_may_wakeup(&dev
->dev
));
1802 error
= pci_set_power_state(dev
, target_state
);
1805 pci_enable_wake(dev
, target_state
, false);
1811 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1812 * @dev: Device to handle.
1814 * Disable device's system wake-up capability and put it into D0.
1816 int pci_back_from_sleep(struct pci_dev
*dev
)
1818 pci_enable_wake(dev
, PCI_D0
, false);
1819 return pci_set_power_state(dev
, PCI_D0
);
1823 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1824 * @dev: PCI device being suspended.
1826 * Prepare @dev to generate wake-up events at run time and put it into a low
1829 int pci_finish_runtime_suspend(struct pci_dev
*dev
)
1831 pci_power_t target_state
= pci_target_state(dev
);
1834 if (target_state
== PCI_POWER_ERROR
)
1837 dev
->runtime_d3cold
= target_state
== PCI_D3cold
;
1839 __pci_enable_wake(dev
, target_state
, true, pci_dev_run_wake(dev
));
1841 error
= pci_set_power_state(dev
, target_state
);
1844 __pci_enable_wake(dev
, target_state
, true, false);
1845 dev
->runtime_d3cold
= false;
1852 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1853 * @dev: Device to check.
1855 * Return true if the device itself is cabable of generating wake-up events
1856 * (through the platform or using the native PCIe PME) or if the device supports
1857 * PME and one of its upstream bridges can generate wake-up events.
1859 bool pci_dev_run_wake(struct pci_dev
*dev
)
1861 struct pci_bus
*bus
= dev
->bus
;
1863 if (device_run_wake(&dev
->dev
))
1866 if (!dev
->pme_support
)
1869 while (bus
->parent
) {
1870 struct pci_dev
*bridge
= bus
->self
;
1872 if (device_run_wake(&bridge
->dev
))
1878 /* We have reached the root bus. */
1880 return device_run_wake(bus
->bridge
);
1884 EXPORT_SYMBOL_GPL(pci_dev_run_wake
);
1886 void pci_config_pm_runtime_get(struct pci_dev
*pdev
)
1888 struct device
*dev
= &pdev
->dev
;
1889 struct device
*parent
= dev
->parent
;
1892 pm_runtime_get_sync(parent
);
1893 pm_runtime_get_noresume(dev
);
1895 * pdev->current_state is set to PCI_D3cold during suspending,
1896 * so wait until suspending completes
1898 pm_runtime_barrier(dev
);
1900 * Only need to resume devices in D3cold, because config
1901 * registers are still accessible for devices suspended but
1904 if (pdev
->current_state
== PCI_D3cold
)
1905 pm_runtime_resume(dev
);
1908 void pci_config_pm_runtime_put(struct pci_dev
*pdev
)
1910 struct device
*dev
= &pdev
->dev
;
1911 struct device
*parent
= dev
->parent
;
1913 pm_runtime_put(dev
);
1915 pm_runtime_put_sync(parent
);
1919 * pci_pm_init - Initialize PM functions of given PCI device
1920 * @dev: PCI device to handle.
1922 void pci_pm_init(struct pci_dev
*dev
)
1927 pm_runtime_forbid(&dev
->dev
);
1928 pm_runtime_set_active(&dev
->dev
);
1929 pm_runtime_enable(&dev
->dev
);
1930 device_enable_async_suspend(&dev
->dev
);
1931 dev
->wakeup_prepared
= false;
1934 dev
->pme_support
= 0;
1936 /* find PCI PM capability in list */
1937 pm
= pci_find_capability(dev
, PCI_CAP_ID_PM
);
1940 /* Check device's ability to generate PME# */
1941 pci_read_config_word(dev
, pm
+ PCI_PM_PMC
, &pmc
);
1943 if ((pmc
& PCI_PM_CAP_VER_MASK
) > 3) {
1944 dev_err(&dev
->dev
, "unsupported PM cap regs version (%u)\n",
1945 pmc
& PCI_PM_CAP_VER_MASK
);
1950 dev
->d3_delay
= PCI_PM_D3_WAIT
;
1951 dev
->d3cold_delay
= PCI_PM_D3COLD_WAIT
;
1952 dev
->d3cold_allowed
= true;
1954 dev
->d1_support
= false;
1955 dev
->d2_support
= false;
1956 if (!pci_no_d1d2(dev
)) {
1957 if (pmc
& PCI_PM_CAP_D1
)
1958 dev
->d1_support
= true;
1959 if (pmc
& PCI_PM_CAP_D2
)
1960 dev
->d2_support
= true;
1962 if (dev
->d1_support
|| dev
->d2_support
)
1963 dev_printk(KERN_DEBUG
, &dev
->dev
, "supports%s%s\n",
1964 dev
->d1_support
? " D1" : "",
1965 dev
->d2_support
? " D2" : "");
1968 pmc
&= PCI_PM_CAP_PME_MASK
;
1970 dev_printk(KERN_DEBUG
, &dev
->dev
,
1971 "PME# supported from%s%s%s%s%s\n",
1972 (pmc
& PCI_PM_CAP_PME_D0
) ? " D0" : "",
1973 (pmc
& PCI_PM_CAP_PME_D1
) ? " D1" : "",
1974 (pmc
& PCI_PM_CAP_PME_D2
) ? " D2" : "",
1975 (pmc
& PCI_PM_CAP_PME_D3
) ? " D3hot" : "",
1976 (pmc
& PCI_PM_CAP_PME_D3cold
) ? " D3cold" : "");
1977 dev
->pme_support
= pmc
>> PCI_PM_CAP_PME_SHIFT
;
1978 dev
->pme_poll
= true;
1980 * Make device's PM flags reflect the wake-up capability, but
1981 * let the user space enable it to wake up the system as needed.
1983 device_set_wakeup_capable(&dev
->dev
, true);
1984 /* Disable the PME# generation functionality */
1985 pci_pme_active(dev
, false);
1989 static void pci_add_saved_cap(struct pci_dev
*pci_dev
,
1990 struct pci_cap_saved_state
*new_cap
)
1992 hlist_add_head(&new_cap
->next
, &pci_dev
->saved_cap_space
);
1996 * pci_add_save_buffer - allocate buffer for saving given capability registers
1997 * @dev: the PCI device
1998 * @cap: the capability to allocate the buffer for
1999 * @size: requested size of the buffer
2001 static int pci_add_cap_save_buffer(
2002 struct pci_dev
*dev
, char cap
, unsigned int size
)
2005 struct pci_cap_saved_state
*save_state
;
2007 pos
= pci_find_capability(dev
, cap
);
2011 save_state
= kzalloc(sizeof(*save_state
) + size
, GFP_KERNEL
);
2015 save_state
->cap
.cap_nr
= cap
;
2016 save_state
->cap
.size
= size
;
2017 pci_add_saved_cap(dev
, save_state
);
2023 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2024 * @dev: the PCI device
2026 void pci_allocate_cap_save_buffers(struct pci_dev
*dev
)
2030 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_EXP
,
2031 PCI_EXP_SAVE_REGS
* sizeof(u16
));
2034 "unable to preallocate PCI Express save buffer\n");
2036 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_PCIX
, sizeof(u16
));
2039 "unable to preallocate PCI-X save buffer\n");
2042 void pci_free_cap_save_buffers(struct pci_dev
*dev
)
2044 struct pci_cap_saved_state
*tmp
;
2045 struct hlist_node
*n
;
2047 hlist_for_each_entry_safe(tmp
, n
, &dev
->saved_cap_space
, next
)
2052 * pci_configure_ari - enable or disable ARI forwarding
2053 * @dev: the PCI device
2055 * If @dev and its upstream bridge both support ARI, enable ARI in the
2056 * bridge. Otherwise, disable ARI in the bridge.
2058 void pci_configure_ari(struct pci_dev
*dev
)
2061 struct pci_dev
*bridge
;
2063 if (pcie_ari_disabled
|| !pci_is_pcie(dev
) || dev
->devfn
)
2066 bridge
= dev
->bus
->self
;
2070 pcie_capability_read_dword(bridge
, PCI_EXP_DEVCAP2
, &cap
);
2071 if (!(cap
& PCI_EXP_DEVCAP2_ARI
))
2074 if (pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ARI
)) {
2075 pcie_capability_set_word(bridge
, PCI_EXP_DEVCTL2
,
2076 PCI_EXP_DEVCTL2_ARI
);
2077 bridge
->ari_enabled
= 1;
2079 pcie_capability_clear_word(bridge
, PCI_EXP_DEVCTL2
,
2080 PCI_EXP_DEVCTL2_ARI
);
2081 bridge
->ari_enabled
= 0;
2086 * pci_enable_ido - enable ID-based Ordering on a device
2087 * @dev: the PCI device
2088 * @type: which types of IDO to enable
2090 * Enable ID-based ordering on @dev. @type can contain the bits
2091 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2092 * which types of transactions are allowed to be re-ordered.
2094 void pci_enable_ido(struct pci_dev
*dev
, unsigned long type
)
2098 if (type
& PCI_EXP_IDO_REQUEST
)
2099 ctrl
|= PCI_EXP_IDO_REQ_EN
;
2100 if (type
& PCI_EXP_IDO_COMPLETION
)
2101 ctrl
|= PCI_EXP_IDO_CMP_EN
;
2103 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2105 EXPORT_SYMBOL(pci_enable_ido
);
2108 * pci_disable_ido - disable ID-based ordering on a device
2109 * @dev: the PCI device
2110 * @type: which types of IDO to disable
2112 void pci_disable_ido(struct pci_dev
*dev
, unsigned long type
)
2116 if (type
& PCI_EXP_IDO_REQUEST
)
2117 ctrl
|= PCI_EXP_IDO_REQ_EN
;
2118 if (type
& PCI_EXP_IDO_COMPLETION
)
2119 ctrl
|= PCI_EXP_IDO_CMP_EN
;
2121 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2123 EXPORT_SYMBOL(pci_disable_ido
);
2126 * pci_enable_obff - enable optimized buffer flush/fill
2128 * @type: type of signaling to use
2130 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2131 * signaling if possible, falling back to message signaling only if
2132 * WAKE# isn't supported. @type should indicate whether the PCIe link
2133 * be brought out of L0s or L1 to send the message. It should be either
2134 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2136 * If your device can benefit from receiving all messages, even at the
2137 * power cost of bringing the link back up from a low power state, use
2138 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2142 * Zero on success, appropriate error number on failure.
2144 int pci_enable_obff(struct pci_dev
*dev
, enum pci_obff_signal_type type
)
2150 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP2
, &cap
);
2151 if (!(cap
& PCI_EXP_OBFF_MASK
))
2152 return -ENOTSUPP
; /* no OBFF support at all */
2154 /* Make sure the topology supports OBFF as well */
2155 if (dev
->bus
->self
) {
2156 ret
= pci_enable_obff(dev
->bus
->self
, type
);
2161 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL2
, &ctrl
);
2162 if (cap
& PCI_EXP_OBFF_WAKE
)
2163 ctrl
|= PCI_EXP_OBFF_WAKE_EN
;
2166 case PCI_EXP_OBFF_SIGNAL_L0
:
2167 if (!(ctrl
& PCI_EXP_OBFF_WAKE_EN
))
2168 ctrl
|= PCI_EXP_OBFF_MSGA_EN
;
2170 case PCI_EXP_OBFF_SIGNAL_ALWAYS
:
2171 ctrl
&= ~PCI_EXP_OBFF_WAKE_EN
;
2172 ctrl
|= PCI_EXP_OBFF_MSGB_EN
;
2175 WARN(1, "bad OBFF signal type\n");
2179 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2183 EXPORT_SYMBOL(pci_enable_obff
);
2186 * pci_disable_obff - disable optimized buffer flush/fill
2189 * Disable OBFF on @dev.
2191 void pci_disable_obff(struct pci_dev
*dev
)
2193 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_OBFF_WAKE_EN
);
2195 EXPORT_SYMBOL(pci_disable_obff
);
2198 * pci_ltr_supported - check whether a device supports LTR
2202 * True if @dev supports latency tolerance reporting, false otherwise.
2204 static bool pci_ltr_supported(struct pci_dev
*dev
)
2208 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP2
, &cap
);
2210 return cap
& PCI_EXP_DEVCAP2_LTR
;
2214 * pci_enable_ltr - enable latency tolerance reporting
2217 * Enable LTR on @dev if possible, which means enabling it first on
2221 * Zero on success, errno on failure.
2223 int pci_enable_ltr(struct pci_dev
*dev
)
2227 /* Only primary function can enable/disable LTR */
2228 if (PCI_FUNC(dev
->devfn
) != 0)
2231 if (!pci_ltr_supported(dev
))
2234 /* Enable upstream ports first */
2235 if (dev
->bus
->self
) {
2236 ret
= pci_enable_ltr(dev
->bus
->self
);
2241 return pcie_capability_set_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_LTR_EN
);
2243 EXPORT_SYMBOL(pci_enable_ltr
);
2246 * pci_disable_ltr - disable latency tolerance reporting
2249 void pci_disable_ltr(struct pci_dev
*dev
)
2251 /* Only primary function can enable/disable LTR */
2252 if (PCI_FUNC(dev
->devfn
) != 0)
2255 if (!pci_ltr_supported(dev
))
2258 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_LTR_EN
);
2260 EXPORT_SYMBOL(pci_disable_ltr
);
2262 static int __pci_ltr_scale(int *val
)
2266 while (*val
> 1023) {
2267 *val
= (*val
+ 31) / 32;
2274 * pci_set_ltr - set LTR latency values
2276 * @snoop_lat_ns: snoop latency in nanoseconds
2277 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2279 * Figure out the scale and set the LTR values accordingly.
2281 int pci_set_ltr(struct pci_dev
*dev
, int snoop_lat_ns
, int nosnoop_lat_ns
)
2283 int pos
, ret
, snoop_scale
, nosnoop_scale
;
2286 if (!pci_ltr_supported(dev
))
2289 snoop_scale
= __pci_ltr_scale(&snoop_lat_ns
);
2290 nosnoop_scale
= __pci_ltr_scale(&nosnoop_lat_ns
);
2292 if (snoop_lat_ns
> PCI_LTR_VALUE_MASK
||
2293 nosnoop_lat_ns
> PCI_LTR_VALUE_MASK
)
2296 if ((snoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)) ||
2297 (nosnoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)))
2300 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_LTR
);
2304 val
= (snoop_scale
<< PCI_LTR_SCALE_SHIFT
) | snoop_lat_ns
;
2305 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_SNOOP_LAT
, val
);
2309 val
= (nosnoop_scale
<< PCI_LTR_SCALE_SHIFT
) | nosnoop_lat_ns
;
2310 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_NOSNOOP_LAT
, val
);
2316 EXPORT_SYMBOL(pci_set_ltr
);
2318 static int pci_acs_enable
;
2321 * pci_request_acs - ask for ACS to be enabled if supported
2323 void pci_request_acs(void)
2329 * pci_enable_acs - enable ACS if hardware support it
2330 * @dev: the PCI device
2332 void pci_enable_acs(struct pci_dev
*dev
)
2338 if (!pci_acs_enable
)
2341 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ACS
);
2345 pci_read_config_word(dev
, pos
+ PCI_ACS_CAP
, &cap
);
2346 pci_read_config_word(dev
, pos
+ PCI_ACS_CTRL
, &ctrl
);
2348 /* Source Validation */
2349 ctrl
|= (cap
& PCI_ACS_SV
);
2351 /* P2P Request Redirect */
2352 ctrl
|= (cap
& PCI_ACS_RR
);
2354 /* P2P Completion Redirect */
2355 ctrl
|= (cap
& PCI_ACS_CR
);
2357 /* Upstream Forwarding */
2358 ctrl
|= (cap
& PCI_ACS_UF
);
2360 pci_write_config_word(dev
, pos
+ PCI_ACS_CTRL
, ctrl
);
2364 * pci_acs_enabled - test ACS against required flags for a given device
2365 * @pdev: device to test
2366 * @acs_flags: required PCI ACS flags
2368 * Return true if the device supports the provided flags. Automatically
2369 * filters out flags that are not implemented on multifunction devices.
2371 bool pci_acs_enabled(struct pci_dev
*pdev
, u16 acs_flags
)
2376 ret
= pci_dev_specific_acs_enabled(pdev
, acs_flags
);
2380 if (!pci_is_pcie(pdev
))
2383 /* Filter out flags not applicable to multifunction */
2384 if (pdev
->multifunction
)
2385 acs_flags
&= (PCI_ACS_RR
| PCI_ACS_CR
|
2386 PCI_ACS_EC
| PCI_ACS_DT
);
2388 if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_DOWNSTREAM
||
2389 pci_pcie_type(pdev
) == PCI_EXP_TYPE_ROOT_PORT
||
2390 pdev
->multifunction
) {
2391 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ACS
);
2395 pci_read_config_word(pdev
, pos
+ PCI_ACS_CTRL
, &ctrl
);
2396 if ((ctrl
& acs_flags
) != acs_flags
)
2404 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2405 * @start: starting downstream device
2406 * @end: ending upstream device or NULL to search to the root bus
2407 * @acs_flags: required flags
2409 * Walk up a device tree from start to end testing PCI ACS support. If
2410 * any step along the way does not support the required flags, return false.
2412 bool pci_acs_path_enabled(struct pci_dev
*start
,
2413 struct pci_dev
*end
, u16 acs_flags
)
2415 struct pci_dev
*pdev
, *parent
= start
;
2420 if (!pci_acs_enabled(pdev
, acs_flags
))
2423 if (pci_is_root_bus(pdev
->bus
))
2424 return (end
== NULL
);
2426 parent
= pdev
->bus
->self
;
2427 } while (pdev
!= end
);
2433 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2434 * @dev: the PCI device
2435 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2437 * Perform INTx swizzling for a device behind one level of bridge. This is
2438 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2439 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2440 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2441 * the PCI Express Base Specification, Revision 2.1)
2443 u8
pci_swizzle_interrupt_pin(const struct pci_dev
*dev
, u8 pin
)
2447 if (pci_ari_enabled(dev
->bus
))
2450 slot
= PCI_SLOT(dev
->devfn
);
2452 return (((pin
- 1) + slot
) % 4) + 1;
2456 pci_get_interrupt_pin(struct pci_dev
*dev
, struct pci_dev
**bridge
)
2464 while (!pci_is_root_bus(dev
->bus
)) {
2465 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2466 dev
= dev
->bus
->self
;
2473 * pci_common_swizzle - swizzle INTx all the way to root bridge
2474 * @dev: the PCI device
2475 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2477 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2478 * bridges all the way up to a PCI root bus.
2480 u8
pci_common_swizzle(struct pci_dev
*dev
, u8
*pinp
)
2484 while (!pci_is_root_bus(dev
->bus
)) {
2485 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2486 dev
= dev
->bus
->self
;
2489 return PCI_SLOT(dev
->devfn
);
2493 * pci_release_region - Release a PCI bar
2494 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2495 * @bar: BAR to release
2497 * Releases the PCI I/O and memory resources previously reserved by a
2498 * successful call to pci_request_region. Call this function only
2499 * after all use of the PCI regions has ceased.
2501 void pci_release_region(struct pci_dev
*pdev
, int bar
)
2503 struct pci_devres
*dr
;
2505 if (pci_resource_len(pdev
, bar
) == 0)
2507 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
2508 release_region(pci_resource_start(pdev
, bar
),
2509 pci_resource_len(pdev
, bar
));
2510 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
2511 release_mem_region(pci_resource_start(pdev
, bar
),
2512 pci_resource_len(pdev
, bar
));
2514 dr
= find_pci_dr(pdev
);
2516 dr
->region_mask
&= ~(1 << bar
);
2520 * __pci_request_region - Reserved PCI I/O and memory resource
2521 * @pdev: PCI device whose resources are to be reserved
2522 * @bar: BAR to be reserved
2523 * @res_name: Name to be associated with resource.
2524 * @exclusive: whether the region access is exclusive or not
2526 * Mark the PCI region associated with PCI device @pdev BR @bar as
2527 * being reserved by owner @res_name. Do not access any
2528 * address inside the PCI regions unless this call returns
2531 * If @exclusive is set, then the region is marked so that userspace
2532 * is explicitly not allowed to map the resource via /dev/mem or
2533 * sysfs MMIO access.
2535 * Returns 0 on success, or %EBUSY on error. A warning
2536 * message is also printed on failure.
2538 static int __pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
,
2541 struct pci_devres
*dr
;
2543 if (pci_resource_len(pdev
, bar
) == 0)
2546 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
) {
2547 if (!request_region(pci_resource_start(pdev
, bar
),
2548 pci_resource_len(pdev
, bar
), res_name
))
2551 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
) {
2552 if (!__request_mem_region(pci_resource_start(pdev
, bar
),
2553 pci_resource_len(pdev
, bar
), res_name
,
2558 dr
= find_pci_dr(pdev
);
2560 dr
->region_mask
|= 1 << bar
;
2565 dev_warn(&pdev
->dev
, "BAR %d: can't reserve %pR\n", bar
,
2566 &pdev
->resource
[bar
]);
2571 * pci_request_region - Reserve PCI I/O and memory resource
2572 * @pdev: PCI device whose resources are to be reserved
2573 * @bar: BAR to be reserved
2574 * @res_name: Name to be associated with resource
2576 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2577 * being reserved by owner @res_name. Do not access any
2578 * address inside the PCI regions unless this call returns
2581 * Returns 0 on success, or %EBUSY on error. A warning
2582 * message is also printed on failure.
2584 int pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2586 return __pci_request_region(pdev
, bar
, res_name
, 0);
2590 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2591 * @pdev: PCI device whose resources are to be reserved
2592 * @bar: BAR to be reserved
2593 * @res_name: Name to be associated with resource.
2595 * Mark the PCI region associated with PCI device @pdev BR @bar as
2596 * being reserved by owner @res_name. Do not access any
2597 * address inside the PCI regions unless this call returns
2600 * Returns 0 on success, or %EBUSY on error. A warning
2601 * message is also printed on failure.
2603 * The key difference that _exclusive makes it that userspace is
2604 * explicitly not allowed to map the resource via /dev/mem or
2607 int pci_request_region_exclusive(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2609 return __pci_request_region(pdev
, bar
, res_name
, IORESOURCE_EXCLUSIVE
);
2612 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2613 * @pdev: PCI device whose resources were previously reserved
2614 * @bars: Bitmask of BARs to be released
2616 * Release selected PCI I/O and memory resources previously reserved.
2617 * Call this function only after all use of the PCI regions has ceased.
2619 void pci_release_selected_regions(struct pci_dev
*pdev
, int bars
)
2623 for (i
= 0; i
< 6; i
++)
2624 if (bars
& (1 << i
))
2625 pci_release_region(pdev
, i
);
2628 static int __pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2629 const char *res_name
, int excl
)
2633 for (i
= 0; i
< 6; i
++)
2634 if (bars
& (1 << i
))
2635 if (__pci_request_region(pdev
, i
, res_name
, excl
))
2641 if (bars
& (1 << i
))
2642 pci_release_region(pdev
, i
);
2649 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2650 * @pdev: PCI device whose resources are to be reserved
2651 * @bars: Bitmask of BARs to be requested
2652 * @res_name: Name to be associated with resource
2654 int pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2655 const char *res_name
)
2657 return __pci_request_selected_regions(pdev
, bars
, res_name
, 0);
2660 int pci_request_selected_regions_exclusive(struct pci_dev
*pdev
,
2661 int bars
, const char *res_name
)
2663 return __pci_request_selected_regions(pdev
, bars
, res_name
,
2664 IORESOURCE_EXCLUSIVE
);
2668 * pci_release_regions - Release reserved PCI I/O and memory resources
2669 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2671 * Releases all PCI I/O and memory resources previously reserved by a
2672 * successful call to pci_request_regions. Call this function only
2673 * after all use of the PCI regions has ceased.
2676 void pci_release_regions(struct pci_dev
*pdev
)
2678 pci_release_selected_regions(pdev
, (1 << 6) - 1);
2682 * pci_request_regions - Reserved PCI I/O and memory resources
2683 * @pdev: PCI device whose resources are to be reserved
2684 * @res_name: Name to be associated with resource.
2686 * Mark all PCI regions associated with PCI device @pdev as
2687 * being reserved by owner @res_name. Do not access any
2688 * address inside the PCI regions unless this call returns
2691 * Returns 0 on success, or %EBUSY on error. A warning
2692 * message is also printed on failure.
2694 int pci_request_regions(struct pci_dev
*pdev
, const char *res_name
)
2696 return pci_request_selected_regions(pdev
, ((1 << 6) - 1), res_name
);
2700 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2701 * @pdev: PCI device whose resources are to be reserved
2702 * @res_name: Name to be associated with resource.
2704 * Mark all PCI regions associated with PCI device @pdev as
2705 * being reserved by owner @res_name. Do not access any
2706 * address inside the PCI regions unless this call returns
2709 * pci_request_regions_exclusive() will mark the region so that
2710 * /dev/mem and the sysfs MMIO access will not be allowed.
2712 * Returns 0 on success, or %EBUSY on error. A warning
2713 * message is also printed on failure.
2715 int pci_request_regions_exclusive(struct pci_dev
*pdev
, const char *res_name
)
2717 return pci_request_selected_regions_exclusive(pdev
,
2718 ((1 << 6) - 1), res_name
);
2721 static void __pci_set_master(struct pci_dev
*dev
, bool enable
)
2725 pci_read_config_word(dev
, PCI_COMMAND
, &old_cmd
);
2727 cmd
= old_cmd
| PCI_COMMAND_MASTER
;
2729 cmd
= old_cmd
& ~PCI_COMMAND_MASTER
;
2730 if (cmd
!= old_cmd
) {
2731 dev_dbg(&dev
->dev
, "%s bus mastering\n",
2732 enable
? "enabling" : "disabling");
2733 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2735 dev
->is_busmaster
= enable
;
2739 * pcibios_setup - process "pci=" kernel boot arguments
2740 * @str: string used to pass in "pci=" kernel boot arguments
2742 * Process kernel boot arguments. This is the default implementation.
2743 * Architecture specific implementations can override this as necessary.
2745 char * __weak __init
pcibios_setup(char *str
)
2751 * pcibios_set_master - enable PCI bus-mastering for device dev
2752 * @dev: the PCI device to enable
2754 * Enables PCI bus-mastering for the device. This is the default
2755 * implementation. Architecture specific implementations can override
2756 * this if necessary.
2758 void __weak
pcibios_set_master(struct pci_dev
*dev
)
2762 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2763 if (pci_is_pcie(dev
))
2766 pci_read_config_byte(dev
, PCI_LATENCY_TIMER
, &lat
);
2768 lat
= (64 <= pcibios_max_latency
) ? 64 : pcibios_max_latency
;
2769 else if (lat
> pcibios_max_latency
)
2770 lat
= pcibios_max_latency
;
2773 dev_printk(KERN_DEBUG
, &dev
->dev
, "setting latency timer to %d\n", lat
);
2774 pci_write_config_byte(dev
, PCI_LATENCY_TIMER
, lat
);
2778 * pci_set_master - enables bus-mastering for device dev
2779 * @dev: the PCI device to enable
2781 * Enables bus-mastering on the device and calls pcibios_set_master()
2782 * to do the needed arch specific settings.
2784 void pci_set_master(struct pci_dev
*dev
)
2786 __pci_set_master(dev
, true);
2787 pcibios_set_master(dev
);
2791 * pci_clear_master - disables bus-mastering for device dev
2792 * @dev: the PCI device to disable
2794 void pci_clear_master(struct pci_dev
*dev
)
2796 __pci_set_master(dev
, false);
2800 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2801 * @dev: the PCI device for which MWI is to be enabled
2803 * Helper function for pci_set_mwi.
2804 * Originally copied from drivers/net/acenic.c.
2805 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2807 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2809 int pci_set_cacheline_size(struct pci_dev
*dev
)
2813 if (!pci_cache_line_size
)
2816 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2817 equal to or multiple of the right value. */
2818 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2819 if (cacheline_size
>= pci_cache_line_size
&&
2820 (cacheline_size
% pci_cache_line_size
) == 0)
2823 /* Write the correct value. */
2824 pci_write_config_byte(dev
, PCI_CACHE_LINE_SIZE
, pci_cache_line_size
);
2826 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2827 if (cacheline_size
== pci_cache_line_size
)
2830 dev_printk(KERN_DEBUG
, &dev
->dev
, "cache line size of %d is not "
2831 "supported\n", pci_cache_line_size
<< 2);
2835 EXPORT_SYMBOL_GPL(pci_set_cacheline_size
);
2837 #ifdef PCI_DISABLE_MWI
2838 int pci_set_mwi(struct pci_dev
*dev
)
2843 int pci_try_set_mwi(struct pci_dev
*dev
)
2848 void pci_clear_mwi(struct pci_dev
*dev
)
2855 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2856 * @dev: the PCI device for which MWI is enabled
2858 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2860 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2863 pci_set_mwi(struct pci_dev
*dev
)
2868 rc
= pci_set_cacheline_size(dev
);
2872 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2873 if (! (cmd
& PCI_COMMAND_INVALIDATE
)) {
2874 dev_dbg(&dev
->dev
, "enabling Mem-Wr-Inval\n");
2875 cmd
|= PCI_COMMAND_INVALIDATE
;
2876 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2883 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2884 * @dev: the PCI device for which MWI is enabled
2886 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2887 * Callers are not required to check the return value.
2889 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2891 int pci_try_set_mwi(struct pci_dev
*dev
)
2893 int rc
= pci_set_mwi(dev
);
2898 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2899 * @dev: the PCI device to disable
2901 * Disables PCI Memory-Write-Invalidate transaction on the device
2904 pci_clear_mwi(struct pci_dev
*dev
)
2908 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2909 if (cmd
& PCI_COMMAND_INVALIDATE
) {
2910 cmd
&= ~PCI_COMMAND_INVALIDATE
;
2911 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2914 #endif /* ! PCI_DISABLE_MWI */
2917 * pci_intx - enables/disables PCI INTx for device dev
2918 * @pdev: the PCI device to operate on
2919 * @enable: boolean: whether to enable or disable PCI INTx
2921 * Enables/disables PCI INTx for device dev
2924 pci_intx(struct pci_dev
*pdev
, int enable
)
2926 u16 pci_command
, new;
2928 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_command
);
2931 new = pci_command
& ~PCI_COMMAND_INTX_DISABLE
;
2933 new = pci_command
| PCI_COMMAND_INTX_DISABLE
;
2936 if (new != pci_command
) {
2937 struct pci_devres
*dr
;
2939 pci_write_config_word(pdev
, PCI_COMMAND
, new);
2941 dr
= find_pci_dr(pdev
);
2942 if (dr
&& !dr
->restore_intx
) {
2943 dr
->restore_intx
= 1;
2944 dr
->orig_intx
= !enable
;
2950 * pci_intx_mask_supported - probe for INTx masking support
2951 * @dev: the PCI device to operate on
2953 * Check if the device dev support INTx masking via the config space
2956 bool pci_intx_mask_supported(struct pci_dev
*dev
)
2958 bool mask_supported
= false;
2961 if (dev
->broken_intx_masking
)
2964 pci_cfg_access_lock(dev
);
2966 pci_read_config_word(dev
, PCI_COMMAND
, &orig
);
2967 pci_write_config_word(dev
, PCI_COMMAND
,
2968 orig
^ PCI_COMMAND_INTX_DISABLE
);
2969 pci_read_config_word(dev
, PCI_COMMAND
, &new);
2972 * There's no way to protect against hardware bugs or detect them
2973 * reliably, but as long as we know what the value should be, let's
2974 * go ahead and check it.
2976 if ((new ^ orig
) & ~PCI_COMMAND_INTX_DISABLE
) {
2977 dev_err(&dev
->dev
, "Command register changed from "
2978 "0x%x to 0x%x: driver or hardware bug?\n", orig
, new);
2979 } else if ((new ^ orig
) & PCI_COMMAND_INTX_DISABLE
) {
2980 mask_supported
= true;
2981 pci_write_config_word(dev
, PCI_COMMAND
, orig
);
2984 pci_cfg_access_unlock(dev
);
2985 return mask_supported
;
2987 EXPORT_SYMBOL_GPL(pci_intx_mask_supported
);
2989 static bool pci_check_and_set_intx_mask(struct pci_dev
*dev
, bool mask
)
2991 struct pci_bus
*bus
= dev
->bus
;
2992 bool mask_updated
= true;
2993 u32 cmd_status_dword
;
2994 u16 origcmd
, newcmd
;
2995 unsigned long flags
;
2999 * We do a single dword read to retrieve both command and status.
3000 * Document assumptions that make this possible.
3002 BUILD_BUG_ON(PCI_COMMAND
% 4);
3003 BUILD_BUG_ON(PCI_COMMAND
+ 2 != PCI_STATUS
);
3005 raw_spin_lock_irqsave(&pci_lock
, flags
);
3007 bus
->ops
->read(bus
, dev
->devfn
, PCI_COMMAND
, 4, &cmd_status_dword
);
3009 irq_pending
= (cmd_status_dword
>> 16) & PCI_STATUS_INTERRUPT
;
3012 * Check interrupt status register to see whether our device
3013 * triggered the interrupt (when masking) or the next IRQ is
3014 * already pending (when unmasking).
3016 if (mask
!= irq_pending
) {
3017 mask_updated
= false;
3021 origcmd
= cmd_status_dword
;
3022 newcmd
= origcmd
& ~PCI_COMMAND_INTX_DISABLE
;
3024 newcmd
|= PCI_COMMAND_INTX_DISABLE
;
3025 if (newcmd
!= origcmd
)
3026 bus
->ops
->write(bus
, dev
->devfn
, PCI_COMMAND
, 2, newcmd
);
3029 raw_spin_unlock_irqrestore(&pci_lock
, flags
);
3031 return mask_updated
;
3035 * pci_check_and_mask_intx - mask INTx on pending interrupt
3036 * @dev: the PCI device to operate on
3038 * Check if the device dev has its INTx line asserted, mask it and
3039 * return true in that case. False is returned if not interrupt was
3042 bool pci_check_and_mask_intx(struct pci_dev
*dev
)
3044 return pci_check_and_set_intx_mask(dev
, true);
3046 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx
);
3049 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
3050 * @dev: the PCI device to operate on
3052 * Check if the device dev has its INTx line asserted, unmask it if not
3053 * and return true. False is returned and the mask remains active if
3054 * there was still an interrupt pending.
3056 bool pci_check_and_unmask_intx(struct pci_dev
*dev
)
3058 return pci_check_and_set_intx_mask(dev
, false);
3060 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx
);
3063 * pci_msi_off - disables any msi or msix capabilities
3064 * @dev: the PCI device to operate on
3066 * If you want to use msi see pci_enable_msi and friends.
3067 * This is a lower level primitive that allows us to disable
3068 * msi operation at the device level.
3070 void pci_msi_off(struct pci_dev
*dev
)
3075 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
3077 pci_read_config_word(dev
, pos
+ PCI_MSI_FLAGS
, &control
);
3078 control
&= ~PCI_MSI_FLAGS_ENABLE
;
3079 pci_write_config_word(dev
, pos
+ PCI_MSI_FLAGS
, control
);
3081 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
3083 pci_read_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, &control
);
3084 control
&= ~PCI_MSIX_FLAGS_ENABLE
;
3085 pci_write_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, control
);
3088 EXPORT_SYMBOL_GPL(pci_msi_off
);
3090 int pci_set_dma_max_seg_size(struct pci_dev
*dev
, unsigned int size
)
3092 return dma_set_max_seg_size(&dev
->dev
, size
);
3094 EXPORT_SYMBOL(pci_set_dma_max_seg_size
);
3096 int pci_set_dma_seg_boundary(struct pci_dev
*dev
, unsigned long mask
)
3098 return dma_set_seg_boundary(&dev
->dev
, mask
);
3100 EXPORT_SYMBOL(pci_set_dma_seg_boundary
);
3102 static int pcie_flr(struct pci_dev
*dev
, int probe
)
3108 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP
, &cap
);
3109 if (!(cap
& PCI_EXP_DEVCAP_FLR
))
3115 /* Wait for Transaction Pending bit clean */
3116 for (i
= 0; i
< 4; i
++) {
3118 msleep((1 << (i
- 1)) * 100);
3120 pcie_capability_read_word(dev
, PCI_EXP_DEVSTA
, &status
);
3121 if (!(status
& PCI_EXP_DEVSTA_TRPND
))
3125 dev_err(&dev
->dev
, "transaction is not cleared; "
3126 "proceeding with reset anyway\n");
3129 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_BCR_FLR
);
3136 static int pci_af_flr(struct pci_dev
*dev
, int probe
)
3143 pos
= pci_find_capability(dev
, PCI_CAP_ID_AF
);
3147 pci_read_config_byte(dev
, pos
+ PCI_AF_CAP
, &cap
);
3148 if (!(cap
& PCI_AF_CAP_TP
) || !(cap
& PCI_AF_CAP_FLR
))
3154 /* Wait for Transaction Pending bit clean */
3155 for (i
= 0; i
< 4; i
++) {
3157 msleep((1 << (i
- 1)) * 100);
3159 pci_read_config_byte(dev
, pos
+ PCI_AF_STATUS
, &status
);
3160 if (!(status
& PCI_AF_STATUS_TP
))
3164 dev_err(&dev
->dev
, "transaction is not cleared; "
3165 "proceeding with reset anyway\n");
3168 pci_write_config_byte(dev
, pos
+ PCI_AF_CTRL
, PCI_AF_CTRL_FLR
);
3175 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3176 * @dev: Device to reset.
3177 * @probe: If set, only check if the device can be reset this way.
3179 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3180 * unset, it will be reinitialized internally when going from PCI_D3hot to
3181 * PCI_D0. If that's the case and the device is not in a low-power state
3182 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3184 * NOTE: This causes the caller to sleep for twice the device power transition
3185 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3186 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3187 * Moreover, only devices in D0 can be reset by this function.
3189 static int pci_pm_reset(struct pci_dev
*dev
, int probe
)
3196 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &csr
);
3197 if (csr
& PCI_PM_CTRL_NO_SOFT_RESET
)
3203 if (dev
->current_state
!= PCI_D0
)
3206 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3208 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3209 pci_dev_d3_sleep(dev
);
3211 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3213 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3214 pci_dev_d3_sleep(dev
);
3220 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3221 * @dev: Bridge device
3223 * Use the bridge control register to assert reset on the secondary bus.
3224 * Devices on the secondary bus are left in power-on state.
3226 void pci_reset_bridge_secondary_bus(struct pci_dev
*dev
)
3230 pci_read_config_word(dev
, PCI_BRIDGE_CONTROL
, &ctrl
);
3231 ctrl
|= PCI_BRIDGE_CTL_BUS_RESET
;
3232 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, ctrl
);
3235 ctrl
&= ~PCI_BRIDGE_CTL_BUS_RESET
;
3236 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, ctrl
);
3239 EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus
);
3241 static int pci_parent_bus_reset(struct pci_dev
*dev
, int probe
)
3243 struct pci_dev
*pdev
;
3245 if (pci_is_root_bus(dev
->bus
) || dev
->subordinate
|| !dev
->bus
->self
)
3248 list_for_each_entry(pdev
, &dev
->bus
->devices
, bus_list
)
3255 pci_reset_bridge_secondary_bus(dev
->bus
->self
);
3260 static int pci_reset_hotplug_slot(struct hotplug_slot
*hotplug
, int probe
)
3264 if (!hotplug
|| !try_module_get(hotplug
->ops
->owner
))
3267 if (hotplug
->ops
->reset_slot
)
3268 rc
= hotplug
->ops
->reset_slot(hotplug
, probe
);
3270 module_put(hotplug
->ops
->owner
);
3275 static int pci_dev_reset_slot_function(struct pci_dev
*dev
, int probe
)
3277 struct pci_dev
*pdev
;
3279 if (dev
->subordinate
|| !dev
->slot
)
3282 list_for_each_entry(pdev
, &dev
->bus
->devices
, bus_list
)
3283 if (pdev
!= dev
&& pdev
->slot
== dev
->slot
)
3286 return pci_reset_hotplug_slot(dev
->slot
->hotplug
, probe
);
3289 static int __pci_dev_reset(struct pci_dev
*dev
, int probe
)
3295 rc
= pci_dev_specific_reset(dev
, probe
);
3299 rc
= pcie_flr(dev
, probe
);
3303 rc
= pci_af_flr(dev
, probe
);
3307 rc
= pci_pm_reset(dev
, probe
);
3311 rc
= pci_dev_reset_slot_function(dev
, probe
);
3315 rc
= pci_parent_bus_reset(dev
, probe
);
3320 static int pci_dev_reset(struct pci_dev
*dev
, int probe
)
3325 pci_cfg_access_lock(dev
);
3326 /* block PM suspend, driver probe, etc. */
3327 device_lock(&dev
->dev
);
3330 rc
= __pci_dev_reset(dev
, probe
);
3333 device_unlock(&dev
->dev
);
3334 pci_cfg_access_unlock(dev
);
3339 * __pci_reset_function - reset a PCI device function
3340 * @dev: PCI device to reset
3342 * Some devices allow an individual function to be reset without affecting
3343 * other functions in the same device. The PCI device must be responsive
3344 * to PCI config space in order to use this function.
3346 * The device function is presumed to be unused when this function is called.
3347 * Resetting the device will make the contents of PCI configuration space
3348 * random, so any caller of this must be prepared to reinitialise the
3349 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3352 * Returns 0 if the device function was successfully reset or negative if the
3353 * device doesn't support resetting a single function.
3355 int __pci_reset_function(struct pci_dev
*dev
)
3357 return pci_dev_reset(dev
, 0);
3359 EXPORT_SYMBOL_GPL(__pci_reset_function
);
3362 * __pci_reset_function_locked - reset a PCI device function while holding
3363 * the @dev mutex lock.
3364 * @dev: PCI device to reset
3366 * Some devices allow an individual function to be reset without affecting
3367 * other functions in the same device. The PCI device must be responsive
3368 * to PCI config space in order to use this function.
3370 * The device function is presumed to be unused and the caller is holding
3371 * the device mutex lock when this function is called.
3372 * Resetting the device will make the contents of PCI configuration space
3373 * random, so any caller of this must be prepared to reinitialise the
3374 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3377 * Returns 0 if the device function was successfully reset or negative if the
3378 * device doesn't support resetting a single function.
3380 int __pci_reset_function_locked(struct pci_dev
*dev
)
3382 return __pci_dev_reset(dev
, 0);
3384 EXPORT_SYMBOL_GPL(__pci_reset_function_locked
);
3387 * pci_probe_reset_function - check whether the device can be safely reset
3388 * @dev: PCI device to reset
3390 * Some devices allow an individual function to be reset without affecting
3391 * other functions in the same device. The PCI device must be responsive
3392 * to PCI config space in order to use this function.
3394 * Returns 0 if the device function can be reset or negative if the
3395 * device doesn't support resetting a single function.
3397 int pci_probe_reset_function(struct pci_dev
*dev
)
3399 return pci_dev_reset(dev
, 1);
3403 * pci_reset_function - quiesce and reset a PCI device function
3404 * @dev: PCI device to reset
3406 * Some devices allow an individual function to be reset without affecting
3407 * other functions in the same device. The PCI device must be responsive
3408 * to PCI config space in order to use this function.
3410 * This function does not just reset the PCI portion of a device, but
3411 * clears all the state associated with the device. This function differs
3412 * from __pci_reset_function in that it saves and restores device state
3415 * Returns 0 if the device function was successfully reset or negative if the
3416 * device doesn't support resetting a single function.
3418 int pci_reset_function(struct pci_dev
*dev
)
3422 rc
= pci_dev_reset(dev
, 1);
3426 pci_save_state(dev
);
3429 * both INTx and MSI are disabled after the Interrupt Disable bit
3430 * is set and the Bus Master bit is cleared.
3432 pci_write_config_word(dev
, PCI_COMMAND
, PCI_COMMAND_INTX_DISABLE
);
3434 rc
= pci_dev_reset(dev
, 0);
3436 pci_restore_state(dev
);
3440 EXPORT_SYMBOL_GPL(pci_reset_function
);
3443 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3444 * @dev: PCI device to query
3446 * Returns mmrbc: maximum designed memory read count in bytes
3447 * or appropriate error value.
3449 int pcix_get_max_mmrbc(struct pci_dev
*dev
)
3454 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3458 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3461 return 512 << ((stat
& PCI_X_STATUS_MAX_READ
) >> 21);
3463 EXPORT_SYMBOL(pcix_get_max_mmrbc
);
3466 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3467 * @dev: PCI device to query
3469 * Returns mmrbc: maximum memory read count in bytes
3470 * or appropriate error value.
3472 int pcix_get_mmrbc(struct pci_dev
*dev
)
3477 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3481 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3484 return 512 << ((cmd
& PCI_X_CMD_MAX_READ
) >> 2);
3486 EXPORT_SYMBOL(pcix_get_mmrbc
);
3489 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3490 * @dev: PCI device to query
3491 * @mmrbc: maximum memory read count in bytes
3492 * valid values are 512, 1024, 2048, 4096
3494 * If possible sets maximum memory read byte count, some bridges have erratas
3495 * that prevent this.
3497 int pcix_set_mmrbc(struct pci_dev
*dev
, int mmrbc
)
3503 if (mmrbc
< 512 || mmrbc
> 4096 || !is_power_of_2(mmrbc
))
3506 v
= ffs(mmrbc
) - 10;
3508 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3512 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3515 if (v
> (stat
& PCI_X_STATUS_MAX_READ
) >> 21)
3518 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3521 o
= (cmd
& PCI_X_CMD_MAX_READ
) >> 2;
3523 if (v
> o
&& (dev
->bus
->bus_flags
& PCI_BUS_FLAGS_NO_MMRBC
))
3526 cmd
&= ~PCI_X_CMD_MAX_READ
;
3528 if (pci_write_config_word(dev
, cap
+ PCI_X_CMD
, cmd
))
3533 EXPORT_SYMBOL(pcix_set_mmrbc
);
3536 * pcie_get_readrq - get PCI Express read request size
3537 * @dev: PCI device to query
3539 * Returns maximum memory read request in bytes
3540 * or appropriate error value.
3542 int pcie_get_readrq(struct pci_dev
*dev
)
3546 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &ctl
);
3548 return 128 << ((ctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
3550 EXPORT_SYMBOL(pcie_get_readrq
);
3553 * pcie_set_readrq - set PCI Express maximum memory read request
3554 * @dev: PCI device to query
3555 * @rq: maximum memory read count in bytes
3556 * valid values are 128, 256, 512, 1024, 2048, 4096
3558 * If possible sets maximum memory read request in bytes
3560 int pcie_set_readrq(struct pci_dev
*dev
, int rq
)
3564 if (rq
< 128 || rq
> 4096 || !is_power_of_2(rq
))
3568 * If using the "performance" PCIe config, we clamp the
3569 * read rq size to the max packet size to prevent the
3570 * host bridge generating requests larger than we can
3573 if (pcie_bus_config
== PCIE_BUS_PERFORMANCE
) {
3574 int mps
= pcie_get_mps(dev
);
3582 v
= (ffs(rq
) - 8) << 12;
3584 return pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
3585 PCI_EXP_DEVCTL_READRQ
, v
);
3587 EXPORT_SYMBOL(pcie_set_readrq
);
3590 * pcie_get_mps - get PCI Express maximum payload size
3591 * @dev: PCI device to query
3593 * Returns maximum payload size in bytes
3594 * or appropriate error value.
3596 int pcie_get_mps(struct pci_dev
*dev
)
3600 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &ctl
);
3602 return 128 << ((ctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
3606 * pcie_set_mps - set PCI Express maximum payload size
3607 * @dev: PCI device to query
3608 * @mps: maximum payload size in bytes
3609 * valid values are 128, 256, 512, 1024, 2048, 4096
3611 * If possible sets maximum payload size
3613 int pcie_set_mps(struct pci_dev
*dev
, int mps
)
3617 if (mps
< 128 || mps
> 4096 || !is_power_of_2(mps
))
3621 if (v
> dev
->pcie_mpss
)
3625 return pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
3626 PCI_EXP_DEVCTL_PAYLOAD
, v
);
3630 * pci_select_bars - Make BAR mask from the type of resource
3631 * @dev: the PCI device for which BAR mask is made
3632 * @flags: resource type mask to be selected
3634 * This helper routine makes bar mask from the type of resource.
3636 int pci_select_bars(struct pci_dev
*dev
, unsigned long flags
)
3639 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++)
3640 if (pci_resource_flags(dev
, i
) & flags
)
3646 * pci_resource_bar - get position of the BAR associated with a resource
3647 * @dev: the PCI device
3648 * @resno: the resource number
3649 * @type: the BAR type to be filled in
3651 * Returns BAR position in config space, or 0 if the BAR is invalid.
3653 int pci_resource_bar(struct pci_dev
*dev
, int resno
, enum pci_bar_type
*type
)
3657 if (resno
< PCI_ROM_RESOURCE
) {
3658 *type
= pci_bar_unknown
;
3659 return PCI_BASE_ADDRESS_0
+ 4 * resno
;
3660 } else if (resno
== PCI_ROM_RESOURCE
) {
3661 *type
= pci_bar_mem32
;
3662 return dev
->rom_base_reg
;
3663 } else if (resno
< PCI_BRIDGE_RESOURCES
) {
3664 /* device specific resource */
3665 reg
= pci_iov_resource_bar(dev
, resno
, type
);
3670 dev_err(&dev
->dev
, "BAR %d: invalid resource\n", resno
);
3674 /* Some architectures require additional programming to enable VGA */
3675 static arch_set_vga_state_t arch_set_vga_state
;
3677 void __init
pci_register_set_vga_state(arch_set_vga_state_t func
)
3679 arch_set_vga_state
= func
; /* NULL disables */
3682 static int pci_set_vga_state_arch(struct pci_dev
*dev
, bool decode
,
3683 unsigned int command_bits
, u32 flags
)
3685 if (arch_set_vga_state
)
3686 return arch_set_vga_state(dev
, decode
, command_bits
,
3692 * pci_set_vga_state - set VGA decode state on device and parents if requested
3693 * @dev: the PCI device
3694 * @decode: true = enable decoding, false = disable decoding
3695 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3696 * @flags: traverse ancestors and change bridges
3697 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3699 int pci_set_vga_state(struct pci_dev
*dev
, bool decode
,
3700 unsigned int command_bits
, u32 flags
)
3702 struct pci_bus
*bus
;
3703 struct pci_dev
*bridge
;
3707 WARN_ON((flags
& PCI_VGA_STATE_CHANGE_DECODES
) & (command_bits
& ~(PCI_COMMAND_IO
|PCI_COMMAND_MEMORY
)));
3709 /* ARCH specific VGA enables */
3710 rc
= pci_set_vga_state_arch(dev
, decode
, command_bits
, flags
);
3714 if (flags
& PCI_VGA_STATE_CHANGE_DECODES
) {
3715 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
3717 cmd
|= command_bits
;
3719 cmd
&= ~command_bits
;
3720 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
3723 if (!(flags
& PCI_VGA_STATE_CHANGE_BRIDGE
))
3730 pci_read_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3733 cmd
|= PCI_BRIDGE_CTL_VGA
;
3735 cmd
&= ~PCI_BRIDGE_CTL_VGA
;
3736 pci_write_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3744 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3745 static char resource_alignment_param
[RESOURCE_ALIGNMENT_PARAM_SIZE
] = {0};
3746 static DEFINE_SPINLOCK(resource_alignment_lock
);
3749 * pci_specified_resource_alignment - get resource alignment specified by user.
3750 * @dev: the PCI device to get
3752 * RETURNS: Resource alignment if it is specified.
3753 * Zero if it is not specified.
3755 static resource_size_t
pci_specified_resource_alignment(struct pci_dev
*dev
)
3757 int seg
, bus
, slot
, func
, align_order
, count
;
3758 resource_size_t align
= 0;
3761 spin_lock(&resource_alignment_lock
);
3762 p
= resource_alignment_param
;
3765 if (sscanf(p
, "%d%n", &align_order
, &count
) == 1 &&
3771 if (sscanf(p
, "%x:%x:%x.%x%n",
3772 &seg
, &bus
, &slot
, &func
, &count
) != 4) {
3774 if (sscanf(p
, "%x:%x.%x%n",
3775 &bus
, &slot
, &func
, &count
) != 3) {
3776 /* Invalid format */
3777 printk(KERN_ERR
"PCI: Can't parse resource_alignment parameter: %s\n",
3783 if (seg
== pci_domain_nr(dev
->bus
) &&
3784 bus
== dev
->bus
->number
&&
3785 slot
== PCI_SLOT(dev
->devfn
) &&
3786 func
== PCI_FUNC(dev
->devfn
)) {
3787 if (align_order
== -1) {
3790 align
= 1 << align_order
;
3795 if (*p
!= ';' && *p
!= ',') {
3796 /* End of param or invalid format */
3801 spin_unlock(&resource_alignment_lock
);
3806 * This function disables memory decoding and releases memory resources
3807 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3808 * It also rounds up size to specified alignment.
3809 * Later on, the kernel will assign page-aligned memory resource back
3812 void pci_reassigndev_resource_alignment(struct pci_dev
*dev
)
3816 resource_size_t align
, size
;
3819 /* check if specified PCI is target device to reassign */
3820 align
= pci_specified_resource_alignment(dev
);
3824 if (dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
&&
3825 (dev
->class >> 8) == PCI_CLASS_BRIDGE_HOST
) {
3827 "Can't reassign resources to host bridge.\n");
3832 "Disabling memory decoding and releasing memory resources.\n");
3833 pci_read_config_word(dev
, PCI_COMMAND
, &command
);
3834 command
&= ~PCI_COMMAND_MEMORY
;
3835 pci_write_config_word(dev
, PCI_COMMAND
, command
);
3837 for (i
= 0; i
< PCI_BRIDGE_RESOURCES
; i
++) {
3838 r
= &dev
->resource
[i
];
3839 if (!(r
->flags
& IORESOURCE_MEM
))
3841 size
= resource_size(r
);
3845 "Rounding up size of resource #%d to %#llx.\n",
3846 i
, (unsigned long long)size
);
3851 /* Need to disable bridge's resource window,
3852 * to enable the kernel to reassign new resource
3855 if (dev
->hdr_type
== PCI_HEADER_TYPE_BRIDGE
&&
3856 (dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
) {
3857 for (i
= PCI_BRIDGE_RESOURCES
; i
< PCI_NUM_RESOURCES
; i
++) {
3858 r
= &dev
->resource
[i
];
3859 if (!(r
->flags
& IORESOURCE_MEM
))
3861 r
->end
= resource_size(r
) - 1;
3864 pci_disable_bridge_window(dev
);
3868 static ssize_t
pci_set_resource_alignment_param(const char *buf
, size_t count
)
3870 if (count
> RESOURCE_ALIGNMENT_PARAM_SIZE
- 1)
3871 count
= RESOURCE_ALIGNMENT_PARAM_SIZE
- 1;
3872 spin_lock(&resource_alignment_lock
);
3873 strncpy(resource_alignment_param
, buf
, count
);
3874 resource_alignment_param
[count
] = '\0';
3875 spin_unlock(&resource_alignment_lock
);
3879 static ssize_t
pci_get_resource_alignment_param(char *buf
, size_t size
)
3882 spin_lock(&resource_alignment_lock
);
3883 count
= snprintf(buf
, size
, "%s", resource_alignment_param
);
3884 spin_unlock(&resource_alignment_lock
);
3888 static ssize_t
pci_resource_alignment_show(struct bus_type
*bus
, char *buf
)
3890 return pci_get_resource_alignment_param(buf
, PAGE_SIZE
);
3893 static ssize_t
pci_resource_alignment_store(struct bus_type
*bus
,
3894 const char *buf
, size_t count
)
3896 return pci_set_resource_alignment_param(buf
, count
);
3899 BUS_ATTR(resource_alignment
, 0644, pci_resource_alignment_show
,
3900 pci_resource_alignment_store
);
3902 static int __init
pci_resource_alignment_sysfs_init(void)
3904 return bus_create_file(&pci_bus_type
,
3905 &bus_attr_resource_alignment
);
3908 late_initcall(pci_resource_alignment_sysfs_init
);
3910 static void pci_no_domains(void)
3912 #ifdef CONFIG_PCI_DOMAINS
3913 pci_domains_supported
= 0;
3918 * pci_ext_cfg_avail - can we access extended PCI config space?
3920 * Returns 1 if we can access PCI extended config space (offsets
3921 * greater than 0xff). This is the default implementation. Architecture
3922 * implementations can override this.
3924 int __weak
pci_ext_cfg_avail(void)
3929 void __weak
pci_fixup_cardbus(struct pci_bus
*bus
)
3932 EXPORT_SYMBOL(pci_fixup_cardbus
);
3934 static int __init
pci_setup(char *str
)
3937 char *k
= strchr(str
, ',');
3940 if (*str
&& (str
= pcibios_setup(str
)) && *str
) {
3941 if (!strcmp(str
, "nomsi")) {
3943 } else if (!strcmp(str
, "noaer")) {
3945 } else if (!strncmp(str
, "realloc=", 8)) {
3946 pci_realloc_get_opt(str
+ 8);
3947 } else if (!strncmp(str
, "realloc", 7)) {
3948 pci_realloc_get_opt("on");
3949 } else if (!strcmp(str
, "nodomains")) {
3951 } else if (!strncmp(str
, "noari", 5)) {
3952 pcie_ari_disabled
= true;
3953 } else if (!strncmp(str
, "cbiosize=", 9)) {
3954 pci_cardbus_io_size
= memparse(str
+ 9, &str
);
3955 } else if (!strncmp(str
, "cbmemsize=", 10)) {
3956 pci_cardbus_mem_size
= memparse(str
+ 10, &str
);
3957 } else if (!strncmp(str
, "resource_alignment=", 19)) {
3958 pci_set_resource_alignment_param(str
+ 19,
3960 } else if (!strncmp(str
, "ecrc=", 5)) {
3961 pcie_ecrc_get_policy(str
+ 5);
3962 } else if (!strncmp(str
, "hpiosize=", 9)) {
3963 pci_hotplug_io_size
= memparse(str
+ 9, &str
);
3964 } else if (!strncmp(str
, "hpmemsize=", 10)) {
3965 pci_hotplug_mem_size
= memparse(str
+ 10, &str
);
3966 } else if (!strncmp(str
, "pcie_bus_tune_off", 17)) {
3967 pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
3968 } else if (!strncmp(str
, "pcie_bus_safe", 13)) {
3969 pcie_bus_config
= PCIE_BUS_SAFE
;
3970 } else if (!strncmp(str
, "pcie_bus_perf", 13)) {
3971 pcie_bus_config
= PCIE_BUS_PERFORMANCE
;
3972 } else if (!strncmp(str
, "pcie_bus_peer2peer", 18)) {
3973 pcie_bus_config
= PCIE_BUS_PEER2PEER
;
3974 } else if (!strncmp(str
, "pcie_scan_all", 13)) {
3975 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS
);
3977 printk(KERN_ERR
"PCI: Unknown option `%s'\n",
3985 early_param("pci", pci_setup
);
3987 EXPORT_SYMBOL(pci_reenable_device
);
3988 EXPORT_SYMBOL(pci_enable_device_io
);
3989 EXPORT_SYMBOL(pci_enable_device_mem
);
3990 EXPORT_SYMBOL(pci_enable_device
);
3991 EXPORT_SYMBOL(pcim_enable_device
);
3992 EXPORT_SYMBOL(pcim_pin_device
);
3993 EXPORT_SYMBOL(pci_disable_device
);
3994 EXPORT_SYMBOL(pci_find_capability
);
3995 EXPORT_SYMBOL(pci_bus_find_capability
);
3996 EXPORT_SYMBOL(pci_release_regions
);
3997 EXPORT_SYMBOL(pci_request_regions
);
3998 EXPORT_SYMBOL(pci_request_regions_exclusive
);
3999 EXPORT_SYMBOL(pci_release_region
);
4000 EXPORT_SYMBOL(pci_request_region
);
4001 EXPORT_SYMBOL(pci_request_region_exclusive
);
4002 EXPORT_SYMBOL(pci_release_selected_regions
);
4003 EXPORT_SYMBOL(pci_request_selected_regions
);
4004 EXPORT_SYMBOL(pci_request_selected_regions_exclusive
);
4005 EXPORT_SYMBOL(pci_set_master
);
4006 EXPORT_SYMBOL(pci_clear_master
);
4007 EXPORT_SYMBOL(pci_set_mwi
);
4008 EXPORT_SYMBOL(pci_try_set_mwi
);
4009 EXPORT_SYMBOL(pci_clear_mwi
);
4010 EXPORT_SYMBOL_GPL(pci_intx
);
4011 EXPORT_SYMBOL(pci_assign_resource
);
4012 EXPORT_SYMBOL(pci_find_parent_resource
);
4013 EXPORT_SYMBOL(pci_select_bars
);
4015 EXPORT_SYMBOL(pci_set_power_state
);
4016 EXPORT_SYMBOL(pci_save_state
);
4017 EXPORT_SYMBOL(pci_restore_state
);
4018 EXPORT_SYMBOL(pci_pme_capable
);
4019 EXPORT_SYMBOL(pci_pme_active
);
4020 EXPORT_SYMBOL(pci_wake_from_d3
);
4021 EXPORT_SYMBOL(pci_target_state
);
4022 EXPORT_SYMBOL(pci_prepare_to_sleep
);
4023 EXPORT_SYMBOL(pci_back_from_sleep
);
4024 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state
);