]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/pci/pci.c
Merge tag 'v4.2-rc8' into x86/mm, before applying new changes
[mirror_ubuntu-artful-kernel.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
7c674700
LP
13#include <linux/of.h>
14#include <linux/of_pci.h>
1da177e4 15#include <linux/pci.h>
075c1771 16#include <linux/pm.h>
5a0e3ad6 17#include <linux/slab.h>
1da177e4
LT
18#include <linux/module.h>
19#include <linux/spinlock.h>
4e57b681 20#include <linux/string.h>
229f5afd 21#include <linux/log2.h>
7d715a6c 22#include <linux/pci-aspm.h>
c300bd2f 23#include <linux/pm_wakeup.h>
8dd7f803 24#include <linux/interrupt.h>
32a9a682 25#include <linux/device.h>
b67ea761 26#include <linux/pm_runtime.h>
608c3881 27#include <linux/pci_hotplug.h>
284f5f9d 28#include <asm-generic/pci-bridge.h>
32a9a682 29#include <asm/setup.h>
bc56b9e0 30#include "pci.h"
1da177e4 31
00240c38
AS
32const char *pci_power_names[] = {
33 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
34};
35EXPORT_SYMBOL_GPL(pci_power_names);
36
93177a74
RW
37int isa_dma_bridge_buggy;
38EXPORT_SYMBOL(isa_dma_bridge_buggy);
39
40int pci_pci_problems;
41EXPORT_SYMBOL(pci_pci_problems);
42
1ae861e6
RW
43unsigned int pci_pm_d3_delay;
44
df17e62e
MG
45static void pci_pme_list_scan(struct work_struct *work);
46
47static LIST_HEAD(pci_pme_list);
48static DEFINE_MUTEX(pci_pme_list_mutex);
49static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
50
51struct pci_pme_device {
52 struct list_head list;
53 struct pci_dev *dev;
54};
55
56#define PME_TIMEOUT 1000 /* How long between PME checks */
57
1ae861e6
RW
58static void pci_dev_d3_sleep(struct pci_dev *dev)
59{
60 unsigned int delay = dev->d3_delay;
61
62 if (delay < pci_pm_d3_delay)
63 delay = pci_pm_d3_delay;
64
65 msleep(delay);
66}
1da177e4 67
32a2eea7
JG
68#ifdef CONFIG_PCI_DOMAINS
69int pci_domains_supported = 1;
70#endif
71
4516a618
AN
72#define DEFAULT_CARDBUS_IO_SIZE (256)
73#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
74/* pci=cbmemsize=nnM,cbiosize=nn can override this */
75unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
76unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77
28760489
EB
78#define DEFAULT_HOTPLUG_IO_SIZE (256)
79#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
80/* pci=hpmemsize=nnM,hpiosize=nn can override this */
81unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
82unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
83
5f39e670 84enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 85
ac1aa47b
JB
86/*
87 * The default CLS is used if arch didn't set CLS explicitly and not
88 * all pci devices agree on the same value. Arch can override either
89 * the dfl or actual value as it sees fit. Don't forget this is
90 * measured in 32-bit words, not bytes.
91 */
15856ad5 92u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
93u8 pci_cache_line_size;
94
96c55900
MS
95/*
96 * If we set up a device for bus mastering, we need to check the latency
97 * timer as certain BIOSes forget to set it properly.
98 */
99unsigned int pcibios_max_latency = 255;
100
6748dcc2
RW
101/* If set, the PCIe ARI capability will not be used. */
102static bool pcie_ari_disabled;
103
1da177e4
LT
104/**
105 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
106 * @bus: pointer to PCI bus structure to search
107 *
108 * Given a PCI bus, returns the highest PCI bus number present in the set
109 * including the given PCI bus and its list of child PCI buses.
110 */
07656d83 111unsigned char pci_bus_max_busnr(struct pci_bus *bus)
1da177e4 112{
94e6a9b9 113 struct pci_bus *tmp;
1da177e4
LT
114 unsigned char max, n;
115
b918c62e 116 max = bus->busn_res.end;
94e6a9b9
YW
117 list_for_each_entry(tmp, &bus->children, node) {
118 n = pci_bus_max_busnr(tmp);
3c78bc61 119 if (n > max)
1da177e4
LT
120 max = n;
121 }
122 return max;
123}
b82db5ce 124EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 125
1684f5dd
AM
126#ifdef CONFIG_HAS_IOMEM
127void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
128{
1f7bf3bf
BH
129 struct resource *res = &pdev->resource[bar];
130
1684f5dd
AM
131 /*
132 * Make sure the BAR is actually a memory resource, not an IO resource
133 */
646c0282 134 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
1f7bf3bf 135 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
1684f5dd
AM
136 return NULL;
137 }
1f7bf3bf 138 return ioremap_nocache(res->start, resource_size(res));
1684f5dd
AM
139}
140EXPORT_SYMBOL_GPL(pci_ioremap_bar);
141#endif
142
687d5fe3
ME
143#define PCI_FIND_CAP_TTL 48
144
145static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
146 u8 pos, int cap, int *ttl)
24a4e377
RD
147{
148 u8 id;
55db3208
SS
149 u16 ent;
150
151 pci_bus_read_config_byte(bus, devfn, pos, &pos);
24a4e377 152
687d5fe3 153 while ((*ttl)--) {
24a4e377
RD
154 if (pos < 0x40)
155 break;
156 pos &= ~3;
55db3208
SS
157 pci_bus_read_config_word(bus, devfn, pos, &ent);
158
159 id = ent & 0xff;
24a4e377
RD
160 if (id == 0xff)
161 break;
162 if (id == cap)
163 return pos;
55db3208 164 pos = (ent >> 8);
24a4e377
RD
165 }
166 return 0;
167}
168
687d5fe3
ME
169static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
170 u8 pos, int cap)
171{
172 int ttl = PCI_FIND_CAP_TTL;
173
174 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
175}
176
24a4e377
RD
177int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
178{
179 return __pci_find_next_cap(dev->bus, dev->devfn,
180 pos + PCI_CAP_LIST_NEXT, cap);
181}
182EXPORT_SYMBOL_GPL(pci_find_next_capability);
183
d3bac118
ME
184static int __pci_bus_find_cap_start(struct pci_bus *bus,
185 unsigned int devfn, u8 hdr_type)
1da177e4
LT
186{
187 u16 status;
1da177e4
LT
188
189 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
190 if (!(status & PCI_STATUS_CAP_LIST))
191 return 0;
192
193 switch (hdr_type) {
194 case PCI_HEADER_TYPE_NORMAL:
195 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 196 return PCI_CAPABILITY_LIST;
1da177e4 197 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 198 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
199 default:
200 return 0;
201 }
d3bac118
ME
202
203 return 0;
1da177e4
LT
204}
205
206/**
f7625980 207 * pci_find_capability - query for devices' capabilities
1da177e4
LT
208 * @dev: PCI device to query
209 * @cap: capability code
210 *
211 * Tell if a device supports a given PCI capability.
212 * Returns the address of the requested capability structure within the
213 * device's PCI configuration space or 0 in case the device does not
214 * support it. Possible values for @cap:
215 *
f7625980
BH
216 * %PCI_CAP_ID_PM Power Management
217 * %PCI_CAP_ID_AGP Accelerated Graphics Port
218 * %PCI_CAP_ID_VPD Vital Product Data
219 * %PCI_CAP_ID_SLOTID Slot Identification
1da177e4 220 * %PCI_CAP_ID_MSI Message Signalled Interrupts
f7625980 221 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
1da177e4
LT
222 * %PCI_CAP_ID_PCIX PCI-X
223 * %PCI_CAP_ID_EXP PCI Express
224 */
225int pci_find_capability(struct pci_dev *dev, int cap)
226{
d3bac118
ME
227 int pos;
228
229 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
230 if (pos)
231 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
232
233 return pos;
1da177e4 234}
b7fe9434 235EXPORT_SYMBOL(pci_find_capability);
1da177e4
LT
236
237/**
f7625980 238 * pci_bus_find_capability - query for devices' capabilities
1da177e4
LT
239 * @bus: the PCI bus to query
240 * @devfn: PCI device to query
241 * @cap: capability code
242 *
243 * Like pci_find_capability() but works for pci devices that do not have a
f7625980 244 * pci_dev structure set up yet.
1da177e4
LT
245 *
246 * Returns the address of the requested capability structure within the
247 * device's PCI configuration space or 0 in case the device does not
248 * support it.
249 */
250int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
251{
d3bac118 252 int pos;
1da177e4
LT
253 u8 hdr_type;
254
255 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
256
d3bac118
ME
257 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
258 if (pos)
259 pos = __pci_find_next_cap(bus, devfn, pos, cap);
260
261 return pos;
1da177e4 262}
b7fe9434 263EXPORT_SYMBOL(pci_bus_find_capability);
1da177e4
LT
264
265/**
44a9a36f 266 * pci_find_next_ext_capability - Find an extended capability
1da177e4 267 * @dev: PCI device to query
44a9a36f 268 * @start: address at which to start looking (0 to start at beginning of list)
1da177e4
LT
269 * @cap: capability code
270 *
44a9a36f 271 * Returns the address of the next matching extended capability structure
1da177e4 272 * within the device's PCI configuration space or 0 if the device does
44a9a36f
BH
273 * not support it. Some capabilities can occur several times, e.g., the
274 * vendor-specific capability, and this provides a way to find them all.
1da177e4 275 */
44a9a36f 276int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
1da177e4
LT
277{
278 u32 header;
557848c3
ZY
279 int ttl;
280 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 281
557848c3
ZY
282 /* minimum 8 bytes per capability */
283 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
284
285 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
286 return 0;
287
44a9a36f
BH
288 if (start)
289 pos = start;
290
1da177e4
LT
291 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
292 return 0;
293
294 /*
295 * If we have no capabilities, this is indicated by cap ID,
296 * cap version and next pointer all being 0.
297 */
298 if (header == 0)
299 return 0;
300
301 while (ttl-- > 0) {
44a9a36f 302 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1da177e4
LT
303 return pos;
304
305 pos = PCI_EXT_CAP_NEXT(header);
557848c3 306 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
307 break;
308
309 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
310 break;
311 }
312
313 return 0;
314}
44a9a36f
BH
315EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
316
317/**
318 * pci_find_ext_capability - Find an extended capability
319 * @dev: PCI device to query
320 * @cap: capability code
321 *
322 * Returns the address of the requested extended capability structure
323 * within the device's PCI configuration space or 0 if the device does
324 * not support it. Possible values for @cap:
325 *
326 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
327 * %PCI_EXT_CAP_ID_VC Virtual Channel
328 * %PCI_EXT_CAP_ID_DSN Device Serial Number
329 * %PCI_EXT_CAP_ID_PWR Power Budgeting
330 */
331int pci_find_ext_capability(struct pci_dev *dev, int cap)
332{
333 return pci_find_next_ext_capability(dev, 0, cap);
334}
3a720d72 335EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 336
687d5fe3
ME
337static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
338{
339 int rc, ttl = PCI_FIND_CAP_TTL;
340 u8 cap, mask;
341
342 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
343 mask = HT_3BIT_CAP_MASK;
344 else
345 mask = HT_5BIT_CAP_MASK;
346
347 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
348 PCI_CAP_ID_HT, &ttl);
349 while (pos) {
350 rc = pci_read_config_byte(dev, pos + 3, &cap);
351 if (rc != PCIBIOS_SUCCESSFUL)
352 return 0;
353
354 if ((cap & mask) == ht_cap)
355 return pos;
356
47a4d5be
BG
357 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
358 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
359 PCI_CAP_ID_HT, &ttl);
360 }
361
362 return 0;
363}
364/**
365 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
366 * @dev: PCI device to query
367 * @pos: Position from which to continue searching
368 * @ht_cap: Hypertransport capability code
369 *
370 * To be used in conjunction with pci_find_ht_capability() to search for
371 * all capabilities matching @ht_cap. @pos should always be a value returned
372 * from pci_find_ht_capability().
373 *
374 * NB. To be 100% safe against broken PCI devices, the caller should take
375 * steps to avoid an infinite loop.
376 */
377int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
378{
379 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
380}
381EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
382
383/**
384 * pci_find_ht_capability - query a device's Hypertransport capabilities
385 * @dev: PCI device to query
386 * @ht_cap: Hypertransport capability code
387 *
388 * Tell if a device supports a given Hypertransport capability.
389 * Returns an address within the device's PCI configuration space
390 * or 0 in case the device does not support the request capability.
391 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
392 * which has a Hypertransport capability matching @ht_cap.
393 */
394int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
395{
396 int pos;
397
398 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
399 if (pos)
400 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
401
402 return pos;
403}
404EXPORT_SYMBOL_GPL(pci_find_ht_capability);
405
1da177e4
LT
406/**
407 * pci_find_parent_resource - return resource region of parent bus of given region
408 * @dev: PCI device structure contains resources to be searched
409 * @res: child resource record for which parent is sought
410 *
411 * For given resource region of given device, return the resource
f44116ae 412 * region of parent bus the given region is contained in.
1da177e4 413 */
3c78bc61
RD
414struct resource *pci_find_parent_resource(const struct pci_dev *dev,
415 struct resource *res)
1da177e4
LT
416{
417 const struct pci_bus *bus = dev->bus;
f44116ae 418 struct resource *r;
1da177e4 419 int i;
1da177e4 420
89a74ecc 421 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
422 if (!r)
423 continue;
f44116ae
BH
424 if (res->start && resource_contains(r, res)) {
425
426 /*
427 * If the window is prefetchable but the BAR is
428 * not, the allocator made a mistake.
429 */
430 if (r->flags & IORESOURCE_PREFETCH &&
431 !(res->flags & IORESOURCE_PREFETCH))
432 return NULL;
433
434 /*
435 * If we're below a transparent bridge, there may
436 * be both a positively-decoded aperture and a
437 * subtractively-decoded region that contain the BAR.
438 * We want the positively-decoded one, so this depends
439 * on pci_bus_for_each_resource() giving us those
440 * first.
441 */
442 return r;
443 }
1da177e4 444 }
f44116ae 445 return NULL;
1da177e4 446}
b7fe9434 447EXPORT_SYMBOL(pci_find_parent_resource);
1da177e4 448
157e876f
AW
449/**
450 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
451 * @dev: the PCI device to operate on
452 * @pos: config space offset of status word
453 * @mask: mask of bit(s) to care about in status word
454 *
455 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
456 */
457int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
458{
459 int i;
460
461 /* Wait for Transaction Pending bit clean */
462 for (i = 0; i < 4; i++) {
463 u16 status;
464 if (i)
465 msleep((1 << (i - 1)) * 100);
466
467 pci_read_config_word(dev, pos, &status);
468 if (!(status & mask))
469 return 1;
470 }
471
472 return 0;
473}
474
064b53db
JL
475/**
476 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
477 * @dev: PCI device to have its BARs restored
478 *
479 * Restore the BAR values for a given device, so as to make it
480 * accessible by its driver.
481 */
3c78bc61 482static void pci_restore_bars(struct pci_dev *dev)
064b53db 483{
bc5f5a82 484 int i;
064b53db 485
bc5f5a82 486 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 487 pci_update_resource(dev, i);
064b53db
JL
488}
489
961d9120
RW
490static struct pci_platform_pm_ops *pci_platform_pm;
491
492int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
493{
eb9d0fe4 494 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
d2e5f0c1 495 || !ops->sleep_wake)
961d9120
RW
496 return -EINVAL;
497 pci_platform_pm = ops;
498 return 0;
499}
500
501static inline bool platform_pci_power_manageable(struct pci_dev *dev)
502{
503 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
504}
505
506static inline int platform_pci_set_power_state(struct pci_dev *dev,
3c78bc61 507 pci_power_t t)
961d9120
RW
508{
509 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
510}
511
512static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
513{
514 return pci_platform_pm ?
515 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
516}
8f7020d3 517
eb9d0fe4
RW
518static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
519{
520 return pci_platform_pm ?
521 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
522}
523
b67ea761
RW
524static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
525{
526 return pci_platform_pm ?
527 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
528}
529
bac2a909
RW
530static inline bool platform_pci_need_resume(struct pci_dev *dev)
531{
532 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
533}
534
1da177e4 535/**
44e4e66e
RW
536 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
537 * given PCI device
538 * @dev: PCI device to handle.
44e4e66e 539 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 540 *
44e4e66e
RW
541 * RETURN VALUE:
542 * -EINVAL if the requested state is invalid.
543 * -EIO if device does not support PCI PM or its PM capabilities register has a
544 * wrong version, or device doesn't support the requested state.
545 * 0 if device already is in the requested state.
546 * 0 if device's power state has been successfully changed.
1da177e4 547 */
f00a20ef 548static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 549{
337001b6 550 u16 pmcsr;
44e4e66e 551 bool need_restore = false;
1da177e4 552
4a865905
RW
553 /* Check if we're already there */
554 if (dev->current_state == state)
555 return 0;
556
337001b6 557 if (!dev->pm_cap)
cca03dec
AL
558 return -EIO;
559
44e4e66e
RW
560 if (state < PCI_D0 || state > PCI_D3hot)
561 return -EINVAL;
562
1da177e4 563 /* Validate current state:
f7625980 564 * Can enter D0 from any state, but if we can only go deeper
1da177e4
LT
565 * to sleep if we're already in a low power state
566 */
4a865905 567 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 568 && dev->current_state > state) {
227f0647
RD
569 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
570 dev->current_state, state);
1da177e4 571 return -EINVAL;
44e4e66e 572 }
1da177e4 573
1da177e4 574 /* check if this device supports the desired state */
337001b6
RW
575 if ((state == PCI_D1 && !dev->d1_support)
576 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 577 return -EIO;
1da177e4 578
337001b6 579 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 580
32a36585 581 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
582 * This doesn't affect PME_Status, disables PME_En, and
583 * sets PowerState to 0.
584 */
32a36585 585 switch (dev->current_state) {
d3535fbb
JL
586 case PCI_D0:
587 case PCI_D1:
588 case PCI_D2:
589 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
590 pmcsr |= state;
591 break;
f62795f1
RW
592 case PCI_D3hot:
593 case PCI_D3cold:
32a36585
JL
594 case PCI_UNKNOWN: /* Boot-up */
595 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 596 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 597 need_restore = true;
32a36585 598 /* Fall-through: force to D0 */
32a36585 599 default:
d3535fbb 600 pmcsr = 0;
32a36585 601 break;
1da177e4
LT
602 }
603
604 /* enter specified state */
337001b6 605 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
606
607 /* Mandatory power management transition delays */
608 /* see PCI PM 1.1 5.6.1 table 18 */
609 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 610 pci_dev_d3_sleep(dev);
1da177e4 611 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 612 udelay(PCI_PM_D2_DELAY);
1da177e4 613
e13cdbd7
RW
614 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
615 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
616 if (dev->current_state != state && printk_ratelimit())
227f0647
RD
617 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
618 dev->current_state);
064b53db 619
448bd857
HY
620 /*
621 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
064b53db
JL
622 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
623 * from D3hot to D0 _may_ perform an internal reset, thereby
624 * going to "D0 Uninitialized" rather than "D0 Initialized".
625 * For example, at least some versions of the 3c905B and the
626 * 3c556B exhibit this behaviour.
627 *
628 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
629 * devices in a D3hot state at boot. Consequently, we need to
630 * restore at least the BARs so that the device will be
631 * accessible to its driver.
632 */
633 if (need_restore)
634 pci_restore_bars(dev);
635
f00a20ef 636 if (dev->bus->self)
7d715a6c
SL
637 pcie_aspm_pm_state_change(dev->bus->self);
638
1da177e4
LT
639 return 0;
640}
641
44e4e66e
RW
642/**
643 * pci_update_current_state - Read PCI power state of given device from its
644 * PCI PM registers and cache it
645 * @dev: PCI device to handle.
f06fc0b6 646 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 647 */
73410429 648void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 649{
337001b6 650 if (dev->pm_cap) {
44e4e66e
RW
651 u16 pmcsr;
652
448bd857
HY
653 /*
654 * Configuration space is not accessible for device in
655 * D3cold, so just keep or set D3cold for safety
656 */
657 if (dev->current_state == PCI_D3cold)
658 return;
659 if (state == PCI_D3cold) {
660 dev->current_state = PCI_D3cold;
661 return;
662 }
337001b6 663 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 664 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
665 } else {
666 dev->current_state = state;
44e4e66e
RW
667 }
668}
669
db288c9c
RW
670/**
671 * pci_power_up - Put the given device into D0 forcibly
672 * @dev: PCI device to power up
673 */
674void pci_power_up(struct pci_dev *dev)
675{
676 if (platform_pci_power_manageable(dev))
677 platform_pci_set_power_state(dev, PCI_D0);
678
679 pci_raw_set_power_state(dev, PCI_D0);
680 pci_update_current_state(dev, PCI_D0);
681}
682
0e5dd46b
RW
683/**
684 * pci_platform_power_transition - Use platform to change device power state
685 * @dev: PCI device to handle.
686 * @state: State to put the device into.
687 */
688static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
689{
690 int error;
691
692 if (platform_pci_power_manageable(dev)) {
693 error = platform_pci_set_power_state(dev, state);
694 if (!error)
695 pci_update_current_state(dev, state);
769ba721 696 } else
0e5dd46b 697 error = -ENODEV;
769ba721
RW
698
699 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
700 dev->current_state = PCI_D0;
0e5dd46b
RW
701
702 return error;
703}
704
0b950f0f
SH
705/**
706 * pci_wakeup - Wake up a PCI device
707 * @pci_dev: Device to handle.
708 * @ign: ignored parameter
709 */
710static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
711{
712 pci_wakeup_event(pci_dev);
713 pm_request_resume(&pci_dev->dev);
714 return 0;
715}
716
717/**
718 * pci_wakeup_bus - Walk given bus and wake up devices on it
719 * @bus: Top bus of the subtree to walk.
720 */
721static void pci_wakeup_bus(struct pci_bus *bus)
722{
723 if (bus)
724 pci_walk_bus(bus, pci_wakeup, NULL);
725}
726
0e5dd46b
RW
727/**
728 * __pci_start_power_transition - Start power transition of a PCI device
729 * @dev: PCI device to handle.
730 * @state: State to put the device into.
731 */
732static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
733{
448bd857 734 if (state == PCI_D0) {
0e5dd46b 735 pci_platform_power_transition(dev, PCI_D0);
448bd857
HY
736 /*
737 * Mandatory power management transition delays, see
738 * PCI Express Base Specification Revision 2.0 Section
739 * 6.6.1: Conventional Reset. Do not delay for
740 * devices powered on/off by corresponding bridge,
741 * because have already delayed for the bridge.
742 */
743 if (dev->runtime_d3cold) {
744 msleep(dev->d3cold_delay);
745 /*
746 * When powering on a bridge from D3cold, the
747 * whole hierarchy may be powered on into
748 * D0uninitialized state, resume them to give
749 * them a chance to suspend again
750 */
751 pci_wakeup_bus(dev->subordinate);
752 }
753 }
754}
755
756/**
757 * __pci_dev_set_current_state - Set current state of a PCI device
758 * @dev: Device to handle
759 * @data: pointer to state to be set
760 */
761static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
762{
763 pci_power_t state = *(pci_power_t *)data;
764
765 dev->current_state = state;
766 return 0;
767}
768
769/**
770 * __pci_bus_set_current_state - Walk given bus and set current state of devices
771 * @bus: Top bus of the subtree to walk.
772 * @state: state to be set
773 */
774static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
775{
776 if (bus)
777 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
0e5dd46b
RW
778}
779
780/**
781 * __pci_complete_power_transition - Complete power transition of a PCI device
782 * @dev: PCI device to handle.
783 * @state: State to put the device into.
784 *
785 * This function should not be called directly by device drivers.
786 */
787int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
788{
448bd857
HY
789 int ret;
790
db288c9c 791 if (state <= PCI_D0)
448bd857
HY
792 return -EINVAL;
793 ret = pci_platform_power_transition(dev, state);
794 /* Power off the bridge may power off the whole hierarchy */
795 if (!ret && state == PCI_D3cold)
796 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
797 return ret;
0e5dd46b
RW
798}
799EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
800
44e4e66e
RW
801/**
802 * pci_set_power_state - Set the power state of a PCI device
803 * @dev: PCI device to handle.
804 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
805 *
877d0310 806 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
807 * the device's PCI PM registers.
808 *
809 * RETURN VALUE:
810 * -EINVAL if the requested state is invalid.
811 * -EIO if device does not support PCI PM or its PM capabilities register has a
812 * wrong version, or device doesn't support the requested state.
813 * 0 if device already is in the requested state.
814 * 0 if device's power state has been successfully changed.
815 */
816int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
817{
337001b6 818 int error;
44e4e66e
RW
819
820 /* bound the state we're entering */
448bd857
HY
821 if (state > PCI_D3cold)
822 state = PCI_D3cold;
44e4e66e
RW
823 else if (state < PCI_D0)
824 state = PCI_D0;
825 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
826 /*
827 * If the device or the parent bridge do not support PCI PM,
828 * ignore the request if we're doing anything other than putting
829 * it into D0 (which would only happen on boot).
830 */
831 return 0;
832
db288c9c
RW
833 /* Check if we're already there */
834 if (dev->current_state == state)
835 return 0;
836
0e5dd46b
RW
837 __pci_start_power_transition(dev, state);
838
979b1791
AC
839 /* This device is quirked not to be put into D3, so
840 don't put it in D3 */
448bd857 841 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
979b1791 842 return 0;
44e4e66e 843
448bd857
HY
844 /*
845 * To put device in D3cold, we put device into D3hot in native
846 * way, then put device into D3cold with platform ops
847 */
848 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
849 PCI_D3hot : state);
44e4e66e 850
0e5dd46b
RW
851 if (!__pci_complete_power_transition(dev, state))
852 error = 0;
44e4e66e
RW
853
854 return error;
855}
b7fe9434 856EXPORT_SYMBOL(pci_set_power_state);
44e4e66e 857
1da177e4
LT
858/**
859 * pci_choose_state - Choose the power state of a PCI device
860 * @dev: PCI device to be suspended
861 * @state: target sleep state for the whole system. This is the value
862 * that is passed to suspend() function.
863 *
864 * Returns PCI power state suitable for given device and given system
865 * message.
866 */
867
868pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
869{
ab826ca4 870 pci_power_t ret;
0f64474b 871
728cdb75 872 if (!dev->pm_cap)
1da177e4
LT
873 return PCI_D0;
874
961d9120
RW
875 ret = platform_pci_choose_state(dev);
876 if (ret != PCI_POWER_ERROR)
877 return ret;
ca078bae
PM
878
879 switch (state.event) {
880 case PM_EVENT_ON:
881 return PCI_D0;
882 case PM_EVENT_FREEZE:
b887d2e6
DB
883 case PM_EVENT_PRETHAW:
884 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 885 case PM_EVENT_SUSPEND:
3a2d5b70 886 case PM_EVENT_HIBERNATE:
ca078bae 887 return PCI_D3hot;
1da177e4 888 default:
80ccba11
BH
889 dev_info(&dev->dev, "unrecognized suspend event %d\n",
890 state.event);
1da177e4
LT
891 BUG();
892 }
893 return PCI_D0;
894}
1da177e4
LT
895EXPORT_SYMBOL(pci_choose_state);
896
89858517
YZ
897#define PCI_EXP_SAVE_REGS 7
898
fd0f7f73
AW
899static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
900 u16 cap, bool extended)
34a4876e
YL
901{
902 struct pci_cap_saved_state *tmp;
34a4876e 903
b67bfe0d 904 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
fd0f7f73 905 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
34a4876e
YL
906 return tmp;
907 }
908 return NULL;
909}
910
fd0f7f73
AW
911struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
912{
913 return _pci_find_saved_cap(dev, cap, false);
914}
915
916struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
917{
918 return _pci_find_saved_cap(dev, cap, true);
919}
920
b56a5a23
MT
921static int pci_save_pcie_state(struct pci_dev *dev)
922{
59875ae4 923 int i = 0;
b56a5a23
MT
924 struct pci_cap_saved_state *save_state;
925 u16 *cap;
926
59875ae4 927 if (!pci_is_pcie(dev))
b56a5a23
MT
928 return 0;
929
9f35575d 930 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 931 if (!save_state) {
e496b617 932 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
933 return -ENOMEM;
934 }
63f4898a 935
59875ae4
JL
936 cap = (u16 *)&save_state->cap.data[0];
937 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
938 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
939 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
940 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
941 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
942 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
943 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
9cb604ed 944
b56a5a23
MT
945 return 0;
946}
947
948static void pci_restore_pcie_state(struct pci_dev *dev)
949{
59875ae4 950 int i = 0;
b56a5a23
MT
951 struct pci_cap_saved_state *save_state;
952 u16 *cap;
953
954 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
59875ae4 955 if (!save_state)
9cb604ed
MS
956 return;
957
59875ae4
JL
958 cap = (u16 *)&save_state->cap.data[0];
959 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
960 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
961 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
962 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
963 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
964 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
965 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
966}
967
cc692a5f
SH
968
969static int pci_save_pcix_state(struct pci_dev *dev)
970{
63f4898a 971 int pos;
cc692a5f 972 struct pci_cap_saved_state *save_state;
cc692a5f
SH
973
974 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
975 if (pos <= 0)
976 return 0;
977
f34303de 978 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 979 if (!save_state) {
e496b617 980 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
981 return -ENOMEM;
982 }
cc692a5f 983
24a4742f
AW
984 pci_read_config_word(dev, pos + PCI_X_CMD,
985 (u16 *)save_state->cap.data);
63f4898a 986
cc692a5f
SH
987 return 0;
988}
989
990static void pci_restore_pcix_state(struct pci_dev *dev)
991{
992 int i = 0, pos;
993 struct pci_cap_saved_state *save_state;
994 u16 *cap;
995
996 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
997 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
998 if (!save_state || pos <= 0)
999 return;
24a4742f 1000 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
1001
1002 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
1003}
1004
1005
1da177e4
LT
1006/**
1007 * pci_save_state - save the PCI configuration space of a device before suspending
1008 * @dev: - PCI device that we're dealing with
1da177e4 1009 */
3c78bc61 1010int pci_save_state(struct pci_dev *dev)
1da177e4
LT
1011{
1012 int i;
1013 /* XXX: 100% dword access ok here? */
1014 for (i = 0; i < 16; i++)
9e0b5b2c 1015 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 1016 dev->state_saved = true;
79e50e72
QL
1017
1018 i = pci_save_pcie_state(dev);
1019 if (i != 0)
b56a5a23 1020 return i;
79e50e72
QL
1021
1022 i = pci_save_pcix_state(dev);
1023 if (i != 0)
cc692a5f 1024 return i;
79e50e72 1025
754834b9 1026 return pci_save_vc_state(dev);
1da177e4 1027}
b7fe9434 1028EXPORT_SYMBOL(pci_save_state);
1da177e4 1029
ebfc5b80
RW
1030static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1031 u32 saved_val, int retry)
1032{
1033 u32 val;
1034
1035 pci_read_config_dword(pdev, offset, &val);
1036 if (val == saved_val)
1037 return;
1038
1039 for (;;) {
227f0647
RD
1040 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1041 offset, val, saved_val);
ebfc5b80
RW
1042 pci_write_config_dword(pdev, offset, saved_val);
1043 if (retry-- <= 0)
1044 return;
1045
1046 pci_read_config_dword(pdev, offset, &val);
1047 if (val == saved_val)
1048 return;
1049
1050 mdelay(1);
1051 }
1052}
1053
a6cb9ee7
RW
1054static void pci_restore_config_space_range(struct pci_dev *pdev,
1055 int start, int end, int retry)
ebfc5b80
RW
1056{
1057 int index;
1058
1059 for (index = end; index >= start; index--)
1060 pci_restore_config_dword(pdev, 4 * index,
1061 pdev->saved_config_space[index],
1062 retry);
1063}
1064
a6cb9ee7
RW
1065static void pci_restore_config_space(struct pci_dev *pdev)
1066{
1067 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1068 pci_restore_config_space_range(pdev, 10, 15, 0);
1069 /* Restore BARs before the command register. */
1070 pci_restore_config_space_range(pdev, 4, 9, 10);
1071 pci_restore_config_space_range(pdev, 0, 3, 0);
1072 } else {
1073 pci_restore_config_space_range(pdev, 0, 15, 0);
1074 }
1075}
1076
f7625980 1077/**
1da177e4
LT
1078 * pci_restore_state - Restore the saved state of a PCI device
1079 * @dev: - PCI device that we're dealing with
1da177e4 1080 */
1d3c16a8 1081void pci_restore_state(struct pci_dev *dev)
1da177e4 1082{
c82f63e4 1083 if (!dev->state_saved)
1d3c16a8 1084 return;
4b77b0a2 1085
b56a5a23
MT
1086 /* PCI Express register must be restored first */
1087 pci_restore_pcie_state(dev);
1900ca13 1088 pci_restore_ats_state(dev);
425c1b22 1089 pci_restore_vc_state(dev);
b56a5a23 1090
a6cb9ee7 1091 pci_restore_config_space(dev);
ebfc5b80 1092
cc692a5f 1093 pci_restore_pcix_state(dev);
41017f0c 1094 pci_restore_msi_state(dev);
8c5cdb6a 1095 pci_restore_iov_state(dev);
8fed4b65 1096
4b77b0a2 1097 dev->state_saved = false;
1da177e4 1098}
b7fe9434 1099EXPORT_SYMBOL(pci_restore_state);
1da177e4 1100
ffbdd3f7
AW
1101struct pci_saved_state {
1102 u32 config_space[16];
1103 struct pci_cap_saved_data cap[0];
1104};
1105
1106/**
1107 * pci_store_saved_state - Allocate and return an opaque struct containing
1108 * the device saved state.
1109 * @dev: PCI device that we're dealing with
1110 *
f7625980 1111 * Return NULL if no state or error.
ffbdd3f7
AW
1112 */
1113struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1114{
1115 struct pci_saved_state *state;
1116 struct pci_cap_saved_state *tmp;
1117 struct pci_cap_saved_data *cap;
ffbdd3f7
AW
1118 size_t size;
1119
1120 if (!dev->state_saved)
1121 return NULL;
1122
1123 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1124
b67bfe0d 1125 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
ffbdd3f7
AW
1126 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1127
1128 state = kzalloc(size, GFP_KERNEL);
1129 if (!state)
1130 return NULL;
1131
1132 memcpy(state->config_space, dev->saved_config_space,
1133 sizeof(state->config_space));
1134
1135 cap = state->cap;
b67bfe0d 1136 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
ffbdd3f7
AW
1137 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1138 memcpy(cap, &tmp->cap, len);
1139 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1140 }
1141 /* Empty cap_save terminates list */
1142
1143 return state;
1144}
1145EXPORT_SYMBOL_GPL(pci_store_saved_state);
1146
1147/**
1148 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1149 * @dev: PCI device that we're dealing with
1150 * @state: Saved state returned from pci_store_saved_state()
1151 */
98d9b271
KRW
1152int pci_load_saved_state(struct pci_dev *dev,
1153 struct pci_saved_state *state)
ffbdd3f7
AW
1154{
1155 struct pci_cap_saved_data *cap;
1156
1157 dev->state_saved = false;
1158
1159 if (!state)
1160 return 0;
1161
1162 memcpy(dev->saved_config_space, state->config_space,
1163 sizeof(state->config_space));
1164
1165 cap = state->cap;
1166 while (cap->size) {
1167 struct pci_cap_saved_state *tmp;
1168
fd0f7f73 1169 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
ffbdd3f7
AW
1170 if (!tmp || tmp->cap.size != cap->size)
1171 return -EINVAL;
1172
1173 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1174 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1175 sizeof(struct pci_cap_saved_data) + cap->size);
1176 }
1177
1178 dev->state_saved = true;
1179 return 0;
1180}
98d9b271 1181EXPORT_SYMBOL_GPL(pci_load_saved_state);
ffbdd3f7
AW
1182
1183/**
1184 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1185 * and free the memory allocated for it.
1186 * @dev: PCI device that we're dealing with
1187 * @state: Pointer to saved state returned from pci_store_saved_state()
1188 */
1189int pci_load_and_free_saved_state(struct pci_dev *dev,
1190 struct pci_saved_state **state)
1191{
1192 int ret = pci_load_saved_state(dev, *state);
1193 kfree(*state);
1194 *state = NULL;
1195 return ret;
1196}
1197EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1198
8a9d5609
BH
1199int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1200{
1201 return pci_enable_resources(dev, bars);
1202}
1203
38cc1302
HS
1204static int do_pci_enable_device(struct pci_dev *dev, int bars)
1205{
1206 int err;
1f6ae47e 1207 struct pci_dev *bridge;
1e2571a7
BH
1208 u16 cmd;
1209 u8 pin;
38cc1302
HS
1210
1211 err = pci_set_power_state(dev, PCI_D0);
1212 if (err < 0 && err != -EIO)
1213 return err;
1f6ae47e
VS
1214
1215 bridge = pci_upstream_bridge(dev);
1216 if (bridge)
1217 pcie_aspm_powersave_config_link(bridge);
1218
38cc1302
HS
1219 err = pcibios_enable_device(dev, bars);
1220 if (err < 0)
1221 return err;
1222 pci_fixup_device(pci_fixup_enable, dev);
1223
866d5417
BH
1224 if (dev->msi_enabled || dev->msix_enabled)
1225 return 0;
1226
1e2571a7
BH
1227 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1228 if (pin) {
1229 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1230 if (cmd & PCI_COMMAND_INTX_DISABLE)
1231 pci_write_config_word(dev, PCI_COMMAND,
1232 cmd & ~PCI_COMMAND_INTX_DISABLE);
1233 }
1234
38cc1302
HS
1235 return 0;
1236}
1237
1238/**
0b62e13b 1239 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1240 * @dev: PCI device to be resumed
1241 *
1242 * Note this function is a backend of pci_default_resume and is not supposed
1243 * to be called by normal code, write proper resume handler and use it instead.
1244 */
0b62e13b 1245int pci_reenable_device(struct pci_dev *dev)
38cc1302 1246{
296ccb08 1247 if (pci_is_enabled(dev))
38cc1302
HS
1248 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1249 return 0;
1250}
b7fe9434 1251EXPORT_SYMBOL(pci_reenable_device);
38cc1302 1252
928bea96
YL
1253static void pci_enable_bridge(struct pci_dev *dev)
1254{
79272138 1255 struct pci_dev *bridge;
928bea96
YL
1256 int retval;
1257
79272138
BH
1258 bridge = pci_upstream_bridge(dev);
1259 if (bridge)
1260 pci_enable_bridge(bridge);
928bea96 1261
cf3e1feb 1262 if (pci_is_enabled(dev)) {
fbeeb822 1263 if (!dev->is_busmaster)
cf3e1feb 1264 pci_set_master(dev);
928bea96 1265 return;
cf3e1feb
YL
1266 }
1267
928bea96
YL
1268 retval = pci_enable_device(dev);
1269 if (retval)
1270 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1271 retval);
1272 pci_set_master(dev);
1273}
1274
b4b4fbba 1275static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1da177e4 1276{
79272138 1277 struct pci_dev *bridge;
1da177e4 1278 int err;
b718989d 1279 int i, bars = 0;
1da177e4 1280
97c145f7
JB
1281 /*
1282 * Power state could be unknown at this point, either due to a fresh
1283 * boot or a device removal call. So get the current power state
1284 * so that things like MSI message writing will behave as expected
1285 * (e.g. if the device really is in D0 at enable time).
1286 */
1287 if (dev->pm_cap) {
1288 u16 pmcsr;
1289 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1290 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1291 }
1292
cc7ba39b 1293 if (atomic_inc_return(&dev->enable_cnt) > 1)
9fb625c3
HS
1294 return 0; /* already enabled */
1295
79272138
BH
1296 bridge = pci_upstream_bridge(dev);
1297 if (bridge)
1298 pci_enable_bridge(bridge);
928bea96 1299
497f16f2
YL
1300 /* only skip sriov related */
1301 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1302 if (dev->resource[i].flags & flags)
1303 bars |= (1 << i);
1304 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1305 if (dev->resource[i].flags & flags)
1306 bars |= (1 << i);
1307
38cc1302 1308 err = do_pci_enable_device(dev, bars);
95a62965 1309 if (err < 0)
38cc1302 1310 atomic_dec(&dev->enable_cnt);
9fb625c3 1311 return err;
1da177e4
LT
1312}
1313
b718989d
BH
1314/**
1315 * pci_enable_device_io - Initialize a device for use with IO space
1316 * @dev: PCI device to be initialized
1317 *
1318 * Initialize device before it's used by a driver. Ask low-level code
1319 * to enable I/O resources. Wake up the device if it was suspended.
1320 * Beware, this function can fail.
1321 */
1322int pci_enable_device_io(struct pci_dev *dev)
1323{
b4b4fbba 1324 return pci_enable_device_flags(dev, IORESOURCE_IO);
b718989d 1325}
b7fe9434 1326EXPORT_SYMBOL(pci_enable_device_io);
b718989d
BH
1327
1328/**
1329 * pci_enable_device_mem - Initialize a device for use with Memory space
1330 * @dev: PCI device to be initialized
1331 *
1332 * Initialize device before it's used by a driver. Ask low-level code
1333 * to enable Memory resources. Wake up the device if it was suspended.
1334 * Beware, this function can fail.
1335 */
1336int pci_enable_device_mem(struct pci_dev *dev)
1337{
b4b4fbba 1338 return pci_enable_device_flags(dev, IORESOURCE_MEM);
b718989d 1339}
b7fe9434 1340EXPORT_SYMBOL(pci_enable_device_mem);
b718989d 1341
bae94d02
IPG
1342/**
1343 * pci_enable_device - Initialize device before it's used by a driver.
1344 * @dev: PCI device to be initialized
1345 *
1346 * Initialize device before it's used by a driver. Ask low-level code
1347 * to enable I/O and memory. Wake up the device if it was suspended.
1348 * Beware, this function can fail.
1349 *
1350 * Note we don't actually enable the device many times if we call
1351 * this function repeatedly (we just increment the count).
1352 */
1353int pci_enable_device(struct pci_dev *dev)
1354{
b4b4fbba 1355 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02 1356}
b7fe9434 1357EXPORT_SYMBOL(pci_enable_device);
bae94d02 1358
9ac7849e
TH
1359/*
1360 * Managed PCI resources. This manages device on/off, intx/msi/msix
1361 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1362 * there's no need to track it separately. pci_devres is initialized
1363 * when a device is enabled using managed PCI device enable interface.
1364 */
1365struct pci_devres {
7f375f32
TH
1366 unsigned int enabled:1;
1367 unsigned int pinned:1;
9ac7849e
TH
1368 unsigned int orig_intx:1;
1369 unsigned int restore_intx:1;
1370 u32 region_mask;
1371};
1372
1373static void pcim_release(struct device *gendev, void *res)
1374{
1375 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1376 struct pci_devres *this = res;
1377 int i;
1378
1379 if (dev->msi_enabled)
1380 pci_disable_msi(dev);
1381 if (dev->msix_enabled)
1382 pci_disable_msix(dev);
1383
1384 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1385 if (this->region_mask & (1 << i))
1386 pci_release_region(dev, i);
1387
1388 if (this->restore_intx)
1389 pci_intx(dev, this->orig_intx);
1390
7f375f32 1391 if (this->enabled && !this->pinned)
9ac7849e
TH
1392 pci_disable_device(dev);
1393}
1394
07656d83 1395static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
9ac7849e
TH
1396{
1397 struct pci_devres *dr, *new_dr;
1398
1399 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1400 if (dr)
1401 return dr;
1402
1403 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1404 if (!new_dr)
1405 return NULL;
1406 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1407}
1408
07656d83 1409static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
9ac7849e
TH
1410{
1411 if (pci_is_managed(pdev))
1412 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1413 return NULL;
1414}
1415
1416/**
1417 * pcim_enable_device - Managed pci_enable_device()
1418 * @pdev: PCI device to be initialized
1419 *
1420 * Managed pci_enable_device().
1421 */
1422int pcim_enable_device(struct pci_dev *pdev)
1423{
1424 struct pci_devres *dr;
1425 int rc;
1426
1427 dr = get_pci_dr(pdev);
1428 if (unlikely(!dr))
1429 return -ENOMEM;
b95d58ea
TH
1430 if (dr->enabled)
1431 return 0;
9ac7849e
TH
1432
1433 rc = pci_enable_device(pdev);
1434 if (!rc) {
1435 pdev->is_managed = 1;
7f375f32 1436 dr->enabled = 1;
9ac7849e
TH
1437 }
1438 return rc;
1439}
b7fe9434 1440EXPORT_SYMBOL(pcim_enable_device);
9ac7849e
TH
1441
1442/**
1443 * pcim_pin_device - Pin managed PCI device
1444 * @pdev: PCI device to pin
1445 *
1446 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1447 * driver detach. @pdev must have been enabled with
1448 * pcim_enable_device().
1449 */
1450void pcim_pin_device(struct pci_dev *pdev)
1451{
1452 struct pci_devres *dr;
1453
1454 dr = find_pci_dr(pdev);
7f375f32 1455 WARN_ON(!dr || !dr->enabled);
9ac7849e 1456 if (dr)
7f375f32 1457 dr->pinned = 1;
9ac7849e 1458}
b7fe9434 1459EXPORT_SYMBOL(pcim_pin_device);
9ac7849e 1460
eca0d467
MG
1461/*
1462 * pcibios_add_device - provide arch specific hooks when adding device dev
1463 * @dev: the PCI device being added
1464 *
1465 * Permits the platform to provide architecture specific functionality when
1466 * devices are added. This is the default implementation. Architecture
1467 * implementations can override this.
1468 */
3c78bc61 1469int __weak pcibios_add_device(struct pci_dev *dev)
eca0d467
MG
1470{
1471 return 0;
1472}
1473
6ae32c53
SO
1474/**
1475 * pcibios_release_device - provide arch specific hooks when releasing device dev
1476 * @dev: the PCI device being released
1477 *
1478 * Permits the platform to provide architecture specific functionality when
1479 * devices are released. This is the default implementation. Architecture
1480 * implementations can override this.
1481 */
1482void __weak pcibios_release_device(struct pci_dev *dev) {}
1483
1da177e4
LT
1484/**
1485 * pcibios_disable_device - disable arch specific PCI resources for device dev
1486 * @dev: the PCI device to disable
1487 *
1488 * Disables architecture specific PCI resources for the device. This
1489 * is the default implementation. Architecture implementations can
1490 * override this.
1491 */
d6d88c83 1492void __weak pcibios_disable_device (struct pci_dev *dev) {}
1da177e4 1493
a43ae58c
HG
1494/**
1495 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1496 * @irq: ISA IRQ to penalize
1497 * @active: IRQ active or not
1498 *
1499 * Permits the platform to provide architecture-specific functionality when
1500 * penalizing ISA IRQs. This is the default implementation. Architecture
1501 * implementations can override this.
1502 */
1503void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1504
fa58d305
RW
1505static void do_pci_disable_device(struct pci_dev *dev)
1506{
1507 u16 pci_command;
1508
1509 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1510 if (pci_command & PCI_COMMAND_MASTER) {
1511 pci_command &= ~PCI_COMMAND_MASTER;
1512 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1513 }
1514
1515 pcibios_disable_device(dev);
1516}
1517
1518/**
1519 * pci_disable_enabled_device - Disable device without updating enable_cnt
1520 * @dev: PCI device to disable
1521 *
1522 * NOTE: This function is a backend of PCI power management routines and is
1523 * not supposed to be called drivers.
1524 */
1525void pci_disable_enabled_device(struct pci_dev *dev)
1526{
296ccb08 1527 if (pci_is_enabled(dev))
fa58d305
RW
1528 do_pci_disable_device(dev);
1529}
1530
1da177e4
LT
1531/**
1532 * pci_disable_device - Disable PCI device after use
1533 * @dev: PCI device to be disabled
1534 *
1535 * Signal to the system that the PCI device is not in use by the system
1536 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1537 *
1538 * Note we don't actually disable the device until all callers of
ee6583f6 1539 * pci_enable_device() have called pci_disable_device().
1da177e4 1540 */
3c78bc61 1541void pci_disable_device(struct pci_dev *dev)
1da177e4 1542{
9ac7849e 1543 struct pci_devres *dr;
99dc804d 1544
9ac7849e
TH
1545 dr = find_pci_dr(dev);
1546 if (dr)
7f375f32 1547 dr->enabled = 0;
9ac7849e 1548
fd6dceab
KK
1549 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1550 "disabling already-disabled device");
1551
cc7ba39b 1552 if (atomic_dec_return(&dev->enable_cnt) != 0)
bae94d02
IPG
1553 return;
1554
fa58d305 1555 do_pci_disable_device(dev);
1da177e4 1556
fa58d305 1557 dev->is_busmaster = 0;
1da177e4 1558}
b7fe9434 1559EXPORT_SYMBOL(pci_disable_device);
1da177e4 1560
f7bdd12d
BK
1561/**
1562 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1563 * @dev: the PCIe device reset
f7bdd12d
BK
1564 * @state: Reset state to enter into
1565 *
1566 *
45e829ea 1567 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1568 * implementation. Architecture implementations can override this.
1569 */
d6d88c83
BH
1570int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1571 enum pcie_reset_state state)
f7bdd12d
BK
1572{
1573 return -EINVAL;
1574}
1575
1576/**
1577 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1578 * @dev: the PCIe device reset
f7bdd12d
BK
1579 * @state: Reset state to enter into
1580 *
1581 *
1582 * Sets the PCI reset state for the device.
1583 */
1584int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1585{
1586 return pcibios_set_pcie_reset_state(dev, state);
1587}
b7fe9434 1588EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
f7bdd12d 1589
58ff4633
RW
1590/**
1591 * pci_check_pme_status - Check if given device has generated PME.
1592 * @dev: Device to check.
1593 *
1594 * Check the PME status of the device and if set, clear it and clear PME enable
1595 * (if set). Return 'true' if PME status and PME enable were both set or
1596 * 'false' otherwise.
1597 */
1598bool pci_check_pme_status(struct pci_dev *dev)
1599{
1600 int pmcsr_pos;
1601 u16 pmcsr;
1602 bool ret = false;
1603
1604 if (!dev->pm_cap)
1605 return false;
1606
1607 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1608 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1609 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1610 return false;
1611
1612 /* Clear PME status. */
1613 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1614 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1615 /* Disable PME to avoid interrupt flood. */
1616 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1617 ret = true;
1618 }
1619
1620 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1621
1622 return ret;
1623}
1624
b67ea761
RW
1625/**
1626 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1627 * @dev: Device to handle.
379021d5 1628 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1629 *
1630 * Check if @dev has generated PME and queue a resume request for it in that
1631 * case.
1632 */
379021d5 1633static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1634{
379021d5
RW
1635 if (pme_poll_reset && dev->pme_poll)
1636 dev->pme_poll = false;
1637
c125e96f 1638 if (pci_check_pme_status(dev)) {
c125e96f 1639 pci_wakeup_event(dev);
0f953bf6 1640 pm_request_resume(&dev->dev);
c125e96f 1641 }
b67ea761
RW
1642 return 0;
1643}
1644
1645/**
1646 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1647 * @bus: Top bus of the subtree to walk.
1648 */
1649void pci_pme_wakeup_bus(struct pci_bus *bus)
1650{
1651 if (bus)
379021d5 1652 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1653}
1654
448bd857 1655
eb9d0fe4
RW
1656/**
1657 * pci_pme_capable - check the capability of PCI device to generate PME#
1658 * @dev: PCI device to handle.
eb9d0fe4
RW
1659 * @state: PCI state from which device will issue PME#.
1660 */
e5899e1b 1661bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1662{
337001b6 1663 if (!dev->pm_cap)
eb9d0fe4
RW
1664 return false;
1665
337001b6 1666 return !!(dev->pme_support & (1 << state));
eb9d0fe4 1667}
b7fe9434 1668EXPORT_SYMBOL(pci_pme_capable);
eb9d0fe4 1669
df17e62e
MG
1670static void pci_pme_list_scan(struct work_struct *work)
1671{
379021d5 1672 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1673
1674 mutex_lock(&pci_pme_list_mutex);
ce300008
BH
1675 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1676 if (pme_dev->dev->pme_poll) {
1677 struct pci_dev *bridge;
1678
1679 bridge = pme_dev->dev->bus->self;
1680 /*
1681 * If bridge is in low power state, the
1682 * configuration space of subordinate devices
1683 * may be not accessible
1684 */
1685 if (bridge && bridge->current_state != PCI_D0)
1686 continue;
1687 pci_pme_wakeup(pme_dev->dev, NULL);
1688 } else {
1689 list_del(&pme_dev->list);
1690 kfree(pme_dev);
379021d5 1691 }
df17e62e 1692 }
ce300008
BH
1693 if (!list_empty(&pci_pme_list))
1694 schedule_delayed_work(&pci_pme_work,
1695 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1696 mutex_unlock(&pci_pme_list_mutex);
1697}
1698
eb9d0fe4
RW
1699/**
1700 * pci_pme_active - enable or disable PCI device's PME# function
1701 * @dev: PCI device to handle.
eb9d0fe4
RW
1702 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1703 *
1704 * The caller must verify that the device is capable of generating PME# before
1705 * calling this function with @enable equal to 'true'.
1706 */
5a6c9b60 1707void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1708{
1709 u16 pmcsr;
1710
ffaddbe8 1711 if (!dev->pme_support)
eb9d0fe4
RW
1712 return;
1713
337001b6 1714 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1715 /* Clear PME_Status by writing 1 to it and enable PME# */
1716 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1717 if (!enable)
1718 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1719
337001b6 1720 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1721
6e965e0d
HY
1722 /*
1723 * PCI (as opposed to PCIe) PME requires that the device have
1724 * its PME# line hooked up correctly. Not all hardware vendors
1725 * do this, so the PME never gets delivered and the device
1726 * remains asleep. The easiest way around this is to
1727 * periodically walk the list of suspended devices and check
1728 * whether any have their PME flag set. The assumption is that
1729 * we'll wake up often enough anyway that this won't be a huge
1730 * hit, and the power savings from the devices will still be a
1731 * win.
1732 *
1733 * Although PCIe uses in-band PME message instead of PME# line
1734 * to report PME, PME does not work for some PCIe devices in
1735 * reality. For example, there are devices that set their PME
1736 * status bits, but don't really bother to send a PME message;
1737 * there are PCI Express Root Ports that don't bother to
1738 * trigger interrupts when they receive PME messages from the
1739 * devices below. So PME poll is used for PCIe devices too.
1740 */
df17e62e 1741
379021d5 1742 if (dev->pme_poll) {
df17e62e
MG
1743 struct pci_pme_device *pme_dev;
1744 if (enable) {
1745 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1746 GFP_KERNEL);
0394cb19
BH
1747 if (!pme_dev) {
1748 dev_warn(&dev->dev, "can't enable PME#\n");
1749 return;
1750 }
df17e62e
MG
1751 pme_dev->dev = dev;
1752 mutex_lock(&pci_pme_list_mutex);
1753 list_add(&pme_dev->list, &pci_pme_list);
1754 if (list_is_singular(&pci_pme_list))
1755 schedule_delayed_work(&pci_pme_work,
1756 msecs_to_jiffies(PME_TIMEOUT));
1757 mutex_unlock(&pci_pme_list_mutex);
1758 } else {
1759 mutex_lock(&pci_pme_list_mutex);
1760 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1761 if (pme_dev->dev == dev) {
1762 list_del(&pme_dev->list);
1763 kfree(pme_dev);
1764 break;
1765 }
1766 }
1767 mutex_unlock(&pci_pme_list_mutex);
1768 }
1769 }
1770
85b8582d 1771 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4 1772}
b7fe9434 1773EXPORT_SYMBOL(pci_pme_active);
eb9d0fe4 1774
1da177e4 1775/**
6cbf8214 1776 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1777 * @dev: PCI device affected
1778 * @state: PCI state from which device will issue wakeup events
6cbf8214 1779 * @runtime: True if the events are to be generated at run time
075c1771
DB
1780 * @enable: True to enable event generation; false to disable
1781 *
1782 * This enables the device as a wakeup event source, or disables it.
1783 * When such events involves platform-specific hooks, those hooks are
1784 * called automatically by this routine.
1785 *
1786 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1787 * always require such platform hooks.
075c1771 1788 *
eb9d0fe4
RW
1789 * RETURN VALUE:
1790 * 0 is returned on success
1791 * -EINVAL is returned if device is not supposed to wake up the system
1792 * Error code depending on the platform is returned if both the platform and
1793 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1794 */
6cbf8214
RW
1795int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1796 bool runtime, bool enable)
1da177e4 1797{
5bcc2fb4 1798 int ret = 0;
075c1771 1799
6cbf8214 1800 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1801 return -EINVAL;
1da177e4 1802
e80bb09d
RW
1803 /* Don't do the same thing twice in a row for one device. */
1804 if (!!enable == !!dev->wakeup_prepared)
1805 return 0;
1806
eb9d0fe4
RW
1807 /*
1808 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1809 * Anderson we should be doing PME# wake enable followed by ACPI wake
1810 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1811 */
1da177e4 1812
5bcc2fb4
RW
1813 if (enable) {
1814 int error;
1da177e4 1815
5bcc2fb4
RW
1816 if (pci_pme_capable(dev, state))
1817 pci_pme_active(dev, true);
1818 else
1819 ret = 1;
6cbf8214
RW
1820 error = runtime ? platform_pci_run_wake(dev, true) :
1821 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1822 if (ret)
1823 ret = error;
e80bb09d
RW
1824 if (!ret)
1825 dev->wakeup_prepared = true;
5bcc2fb4 1826 } else {
6cbf8214
RW
1827 if (runtime)
1828 platform_pci_run_wake(dev, false);
1829 else
1830 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1831 pci_pme_active(dev, false);
e80bb09d 1832 dev->wakeup_prepared = false;
5bcc2fb4 1833 }
1da177e4 1834
5bcc2fb4 1835 return ret;
eb9d0fe4 1836}
6cbf8214 1837EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1838
0235c4fc
RW
1839/**
1840 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1841 * @dev: PCI device to prepare
1842 * @enable: True to enable wake-up event generation; false to disable
1843 *
1844 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1845 * and this function allows them to set that up cleanly - pci_enable_wake()
1846 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1847 * ordering constraints.
1848 *
1849 * This function only returns error code if the device is not capable of
1850 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1851 * enable wake-up power for it.
1852 */
1853int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1854{
1855 return pci_pme_capable(dev, PCI_D3cold) ?
1856 pci_enable_wake(dev, PCI_D3cold, enable) :
1857 pci_enable_wake(dev, PCI_D3hot, enable);
1858}
b7fe9434 1859EXPORT_SYMBOL(pci_wake_from_d3);
0235c4fc 1860
404cc2d8 1861/**
37139074
JB
1862 * pci_target_state - find an appropriate low power state for a given PCI dev
1863 * @dev: PCI device
1864 *
1865 * Use underlying platform code to find a supported low power state for @dev.
1866 * If the platform can't manage @dev, return the deepest state from which it
1867 * can generate wake events, based on any available PME info.
404cc2d8 1868 */
0b950f0f 1869static pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1870{
1871 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1872
1873 if (platform_pci_power_manageable(dev)) {
1874 /*
1875 * Call the platform to choose the target state of the device
1876 * and enable wake-up from this state if supported.
1877 */
1878 pci_power_t state = platform_pci_choose_state(dev);
1879
1880 switch (state) {
1881 case PCI_POWER_ERROR:
1882 case PCI_UNKNOWN:
1883 break;
1884 case PCI_D1:
1885 case PCI_D2:
1886 if (pci_no_d1d2(dev))
1887 break;
1888 default:
1889 target_state = state;
404cc2d8 1890 }
d2abdf62
RW
1891 } else if (!dev->pm_cap) {
1892 target_state = PCI_D0;
404cc2d8
RW
1893 } else if (device_may_wakeup(&dev->dev)) {
1894 /*
1895 * Find the deepest state from which the device can generate
1896 * wake-up events, make it the target state and enable device
1897 * to generate PME#.
1898 */
337001b6
RW
1899 if (dev->pme_support) {
1900 while (target_state
1901 && !(dev->pme_support & (1 << target_state)))
1902 target_state--;
404cc2d8
RW
1903 }
1904 }
1905
e5899e1b
RW
1906 return target_state;
1907}
1908
1909/**
1910 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1911 * @dev: Device to handle.
1912 *
1913 * Choose the power state appropriate for the device depending on whether
1914 * it can wake up the system and/or is power manageable by the platform
1915 * (PCI_D3hot is the default) and put the device into that state.
1916 */
1917int pci_prepare_to_sleep(struct pci_dev *dev)
1918{
1919 pci_power_t target_state = pci_target_state(dev);
1920 int error;
1921
1922 if (target_state == PCI_POWER_ERROR)
1923 return -EIO;
1924
8efb8c76 1925 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1926
404cc2d8
RW
1927 error = pci_set_power_state(dev, target_state);
1928
1929 if (error)
1930 pci_enable_wake(dev, target_state, false);
1931
1932 return error;
1933}
b7fe9434 1934EXPORT_SYMBOL(pci_prepare_to_sleep);
404cc2d8
RW
1935
1936/**
443bd1c4 1937 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1938 * @dev: Device to handle.
1939 *
88393161 1940 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1941 */
1942int pci_back_from_sleep(struct pci_dev *dev)
1943{
1944 pci_enable_wake(dev, PCI_D0, false);
1945 return pci_set_power_state(dev, PCI_D0);
1946}
b7fe9434 1947EXPORT_SYMBOL(pci_back_from_sleep);
404cc2d8 1948
6cbf8214
RW
1949/**
1950 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1951 * @dev: PCI device being suspended.
1952 *
1953 * Prepare @dev to generate wake-up events at run time and put it into a low
1954 * power state.
1955 */
1956int pci_finish_runtime_suspend(struct pci_dev *dev)
1957{
1958 pci_power_t target_state = pci_target_state(dev);
1959 int error;
1960
1961 if (target_state == PCI_POWER_ERROR)
1962 return -EIO;
1963
448bd857
HY
1964 dev->runtime_d3cold = target_state == PCI_D3cold;
1965
6cbf8214
RW
1966 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1967
1968 error = pci_set_power_state(dev, target_state);
1969
448bd857 1970 if (error) {
6cbf8214 1971 __pci_enable_wake(dev, target_state, true, false);
448bd857
HY
1972 dev->runtime_d3cold = false;
1973 }
6cbf8214
RW
1974
1975 return error;
1976}
1977
b67ea761
RW
1978/**
1979 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1980 * @dev: Device to check.
1981 *
f7625980 1982 * Return true if the device itself is capable of generating wake-up events
b67ea761
RW
1983 * (through the platform or using the native PCIe PME) or if the device supports
1984 * PME and one of its upstream bridges can generate wake-up events.
1985 */
1986bool pci_dev_run_wake(struct pci_dev *dev)
1987{
1988 struct pci_bus *bus = dev->bus;
1989
1990 if (device_run_wake(&dev->dev))
1991 return true;
1992
1993 if (!dev->pme_support)
1994 return false;
1995
1996 while (bus->parent) {
1997 struct pci_dev *bridge = bus->self;
1998
1999 if (device_run_wake(&bridge->dev))
2000 return true;
2001
2002 bus = bus->parent;
2003 }
2004
2005 /* We have reached the root bus. */
2006 if (bus->bridge)
2007 return device_run_wake(bus->bridge);
2008
2009 return false;
2010}
2011EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2012
bac2a909
RW
2013/**
2014 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2015 * @pci_dev: Device to check.
2016 *
2017 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2018 * reconfigured due to wakeup settings difference between system and runtime
2019 * suspend and the current power state of it is suitable for the upcoming
2020 * (system) transition.
2021 */
2022bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2023{
2024 struct device *dev = &pci_dev->dev;
2025
2026 if (!pm_runtime_suspended(dev)
2027 || (device_can_wakeup(dev) && !device_may_wakeup(dev))
2028 || platform_pci_need_resume(pci_dev))
2029 return false;
2030
2031 return pci_target_state(pci_dev) == pci_dev->current_state;
2032}
2033
b3c32c4f
HY
2034void pci_config_pm_runtime_get(struct pci_dev *pdev)
2035{
2036 struct device *dev = &pdev->dev;
2037 struct device *parent = dev->parent;
2038
2039 if (parent)
2040 pm_runtime_get_sync(parent);
2041 pm_runtime_get_noresume(dev);
2042 /*
2043 * pdev->current_state is set to PCI_D3cold during suspending,
2044 * so wait until suspending completes
2045 */
2046 pm_runtime_barrier(dev);
2047 /*
2048 * Only need to resume devices in D3cold, because config
2049 * registers are still accessible for devices suspended but
2050 * not in D3cold.
2051 */
2052 if (pdev->current_state == PCI_D3cold)
2053 pm_runtime_resume(dev);
2054}
2055
2056void pci_config_pm_runtime_put(struct pci_dev *pdev)
2057{
2058 struct device *dev = &pdev->dev;
2059 struct device *parent = dev->parent;
2060
2061 pm_runtime_put(dev);
2062 if (parent)
2063 pm_runtime_put_sync(parent);
2064}
2065
eb9d0fe4
RW
2066/**
2067 * pci_pm_init - Initialize PM functions of given PCI device
2068 * @dev: PCI device to handle.
2069 */
2070void pci_pm_init(struct pci_dev *dev)
2071{
2072 int pm;
2073 u16 pmc;
1da177e4 2074
bb910a70 2075 pm_runtime_forbid(&dev->dev);
967577b0
HY
2076 pm_runtime_set_active(&dev->dev);
2077 pm_runtime_enable(&dev->dev);
a1e4d72c 2078 device_enable_async_suspend(&dev->dev);
e80bb09d 2079 dev->wakeup_prepared = false;
bb910a70 2080
337001b6 2081 dev->pm_cap = 0;
ffaddbe8 2082 dev->pme_support = 0;
337001b6 2083
eb9d0fe4
RW
2084 /* find PCI PM capability in list */
2085 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2086 if (!pm)
50246dd4 2087 return;
eb9d0fe4
RW
2088 /* Check device's ability to generate PME# */
2089 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 2090
eb9d0fe4
RW
2091 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2092 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2093 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 2094 return;
eb9d0fe4
RW
2095 }
2096
337001b6 2097 dev->pm_cap = pm;
1ae861e6 2098 dev->d3_delay = PCI_PM_D3_WAIT;
448bd857 2099 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
4f9c1397 2100 dev->d3cold_allowed = true;
337001b6
RW
2101
2102 dev->d1_support = false;
2103 dev->d2_support = false;
2104 if (!pci_no_d1d2(dev)) {
c9ed77ee 2105 if (pmc & PCI_PM_CAP_D1)
337001b6 2106 dev->d1_support = true;
c9ed77ee 2107 if (pmc & PCI_PM_CAP_D2)
337001b6 2108 dev->d2_support = true;
c9ed77ee
BH
2109
2110 if (dev->d1_support || dev->d2_support)
2111 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
2112 dev->d1_support ? " D1" : "",
2113 dev->d2_support ? " D2" : "");
337001b6
RW
2114 }
2115
2116 pmc &= PCI_PM_CAP_PME_MASK;
2117 if (pmc) {
10c3d71d
BH
2118 dev_printk(KERN_DEBUG, &dev->dev,
2119 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
2120 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2121 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2122 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2123 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2124 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 2125 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 2126 dev->pme_poll = true;
eb9d0fe4
RW
2127 /*
2128 * Make device's PM flags reflect the wake-up capability, but
2129 * let the user space enable it to wake up the system as needed.
2130 */
2131 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 2132 /* Disable the PME# generation functionality */
337001b6 2133 pci_pme_active(dev, false);
eb9d0fe4 2134 }
1da177e4
LT
2135}
2136
34a4876e
YL
2137static void pci_add_saved_cap(struct pci_dev *pci_dev,
2138 struct pci_cap_saved_state *new_cap)
2139{
2140 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2141}
2142
63f4898a 2143/**
fd0f7f73
AW
2144 * _pci_add_cap_save_buffer - allocate buffer for saving given
2145 * capability registers
63f4898a
RW
2146 * @dev: the PCI device
2147 * @cap: the capability to allocate the buffer for
fd0f7f73 2148 * @extended: Standard or Extended capability ID
63f4898a
RW
2149 * @size: requested size of the buffer
2150 */
fd0f7f73
AW
2151static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2152 bool extended, unsigned int size)
63f4898a
RW
2153{
2154 int pos;
2155 struct pci_cap_saved_state *save_state;
2156
fd0f7f73
AW
2157 if (extended)
2158 pos = pci_find_ext_capability(dev, cap);
2159 else
2160 pos = pci_find_capability(dev, cap);
2161
63f4898a
RW
2162 if (pos <= 0)
2163 return 0;
2164
2165 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2166 if (!save_state)
2167 return -ENOMEM;
2168
24a4742f 2169 save_state->cap.cap_nr = cap;
fd0f7f73 2170 save_state->cap.cap_extended = extended;
24a4742f 2171 save_state->cap.size = size;
63f4898a
RW
2172 pci_add_saved_cap(dev, save_state);
2173
2174 return 0;
2175}
2176
fd0f7f73
AW
2177int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2178{
2179 return _pci_add_cap_save_buffer(dev, cap, false, size);
2180}
2181
2182int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2183{
2184 return _pci_add_cap_save_buffer(dev, cap, true, size);
2185}
2186
63f4898a
RW
2187/**
2188 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2189 * @dev: the PCI device
2190 */
2191void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2192{
2193 int error;
2194
89858517
YZ
2195 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2196 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
2197 if (error)
2198 dev_err(&dev->dev,
2199 "unable to preallocate PCI Express save buffer\n");
2200
2201 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2202 if (error)
2203 dev_err(&dev->dev,
2204 "unable to preallocate PCI-X save buffer\n");
425c1b22
AW
2205
2206 pci_allocate_vc_save_buffers(dev);
63f4898a
RW
2207}
2208
f796841e
YL
2209void pci_free_cap_save_buffers(struct pci_dev *dev)
2210{
2211 struct pci_cap_saved_state *tmp;
b67bfe0d 2212 struct hlist_node *n;
f796841e 2213
b67bfe0d 2214 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
f796841e
YL
2215 kfree(tmp);
2216}
2217
58c3a727 2218/**
31ab2476 2219 * pci_configure_ari - enable or disable ARI forwarding
58c3a727 2220 * @dev: the PCI device
b0cc6020
YW
2221 *
2222 * If @dev and its upstream bridge both support ARI, enable ARI in the
2223 * bridge. Otherwise, disable ARI in the bridge.
58c3a727 2224 */
31ab2476 2225void pci_configure_ari(struct pci_dev *dev)
58c3a727 2226{
58c3a727 2227 u32 cap;
8113587c 2228 struct pci_dev *bridge;
58c3a727 2229
6748dcc2 2230 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
2231 return;
2232
8113587c 2233 bridge = dev->bus->self;
cb97ae34 2234 if (!bridge)
8113587c
ZY
2235 return;
2236
59875ae4 2237 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2238 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2239 return;
2240
b0cc6020
YW
2241 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2242 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2243 PCI_EXP_DEVCTL2_ARI);
2244 bridge->ari_enabled = 1;
2245 } else {
2246 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2247 PCI_EXP_DEVCTL2_ARI);
2248 bridge->ari_enabled = 0;
2249 }
58c3a727
YZ
2250}
2251
5d990b62
CW
2252static int pci_acs_enable;
2253
2254/**
2255 * pci_request_acs - ask for ACS to be enabled if supported
2256 */
2257void pci_request_acs(void)
2258{
2259 pci_acs_enable = 1;
2260}
2261
ae21ee65 2262/**
2c744244 2263 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
ae21ee65
AK
2264 * @dev: the PCI device
2265 */
2c744244 2266static int pci_std_enable_acs(struct pci_dev *dev)
ae21ee65
AK
2267{
2268 int pos;
2269 u16 cap;
2270 u16 ctrl;
2271
ae21ee65
AK
2272 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2273 if (!pos)
2c744244 2274 return -ENODEV;
ae21ee65
AK
2275
2276 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2277 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2278
2279 /* Source Validation */
2280 ctrl |= (cap & PCI_ACS_SV);
2281
2282 /* P2P Request Redirect */
2283 ctrl |= (cap & PCI_ACS_RR);
2284
2285 /* P2P Completion Redirect */
2286 ctrl |= (cap & PCI_ACS_CR);
2287
2288 /* Upstream Forwarding */
2289 ctrl |= (cap & PCI_ACS_UF);
2290
2291 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2c744244
AW
2292
2293 return 0;
2294}
2295
2296/**
2297 * pci_enable_acs - enable ACS if hardware support it
2298 * @dev: the PCI device
2299 */
2300void pci_enable_acs(struct pci_dev *dev)
2301{
2302 if (!pci_acs_enable)
2303 return;
2304
2305 if (!pci_std_enable_acs(dev))
2306 return;
2307
2308 pci_dev_specific_enable_acs(dev);
ae21ee65
AK
2309}
2310
0a67119f
AW
2311static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2312{
2313 int pos;
83db7e0b 2314 u16 cap, ctrl;
0a67119f
AW
2315
2316 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2317 if (!pos)
2318 return false;
2319
83db7e0b
AW
2320 /*
2321 * Except for egress control, capabilities are either required
2322 * or only required if controllable. Features missing from the
2323 * capability field can therefore be assumed as hard-wired enabled.
2324 */
2325 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2326 acs_flags &= (cap | PCI_ACS_EC);
2327
0a67119f
AW
2328 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2329 return (ctrl & acs_flags) == acs_flags;
2330}
2331
ad805758
AW
2332/**
2333 * pci_acs_enabled - test ACS against required flags for a given device
2334 * @pdev: device to test
2335 * @acs_flags: required PCI ACS flags
2336 *
2337 * Return true if the device supports the provided flags. Automatically
2338 * filters out flags that are not implemented on multifunction devices.
0a67119f
AW
2339 *
2340 * Note that this interface checks the effective ACS capabilities of the
2341 * device rather than the actual capabilities. For instance, most single
2342 * function endpoints are not required to support ACS because they have no
2343 * opportunity for peer-to-peer access. We therefore return 'true'
2344 * regardless of whether the device exposes an ACS capability. This makes
2345 * it much easier for callers of this function to ignore the actual type
2346 * or topology of the device when testing ACS support.
ad805758
AW
2347 */
2348bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2349{
0a67119f 2350 int ret;
ad805758
AW
2351
2352 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2353 if (ret >= 0)
2354 return ret > 0;
2355
0a67119f
AW
2356 /*
2357 * Conventional PCI and PCI-X devices never support ACS, either
2358 * effectively or actually. The shared bus topology implies that
2359 * any device on the bus can receive or snoop DMA.
2360 */
ad805758
AW
2361 if (!pci_is_pcie(pdev))
2362 return false;
2363
0a67119f
AW
2364 switch (pci_pcie_type(pdev)) {
2365 /*
2366 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
f7625980 2367 * but since their primary interface is PCI/X, we conservatively
0a67119f
AW
2368 * handle them as we would a non-PCIe device.
2369 */
2370 case PCI_EXP_TYPE_PCIE_BRIDGE:
2371 /*
2372 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2373 * applicable... must never implement an ACS Extended Capability...".
2374 * This seems arbitrary, but we take a conservative interpretation
2375 * of this statement.
2376 */
2377 case PCI_EXP_TYPE_PCI_BRIDGE:
2378 case PCI_EXP_TYPE_RC_EC:
2379 return false;
2380 /*
2381 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2382 * implement ACS in order to indicate their peer-to-peer capabilities,
2383 * regardless of whether they are single- or multi-function devices.
2384 */
2385 case PCI_EXP_TYPE_DOWNSTREAM:
2386 case PCI_EXP_TYPE_ROOT_PORT:
2387 return pci_acs_flags_enabled(pdev, acs_flags);
2388 /*
2389 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2390 * implemented by the remaining PCIe types to indicate peer-to-peer
f7625980 2391 * capabilities, but only when they are part of a multifunction
0a67119f
AW
2392 * device. The footnote for section 6.12 indicates the specific
2393 * PCIe types included here.
2394 */
2395 case PCI_EXP_TYPE_ENDPOINT:
2396 case PCI_EXP_TYPE_UPSTREAM:
2397 case PCI_EXP_TYPE_LEG_END:
2398 case PCI_EXP_TYPE_RC_END:
2399 if (!pdev->multifunction)
2400 break;
2401
0a67119f 2402 return pci_acs_flags_enabled(pdev, acs_flags);
ad805758
AW
2403 }
2404
0a67119f 2405 /*
f7625980 2406 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
0a67119f
AW
2407 * to single function devices with the exception of downstream ports.
2408 */
ad805758
AW
2409 return true;
2410}
2411
2412/**
2413 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2414 * @start: starting downstream device
2415 * @end: ending upstream device or NULL to search to the root bus
2416 * @acs_flags: required flags
2417 *
2418 * Walk up a device tree from start to end testing PCI ACS support. If
2419 * any step along the way does not support the required flags, return false.
2420 */
2421bool pci_acs_path_enabled(struct pci_dev *start,
2422 struct pci_dev *end, u16 acs_flags)
2423{
2424 struct pci_dev *pdev, *parent = start;
2425
2426 do {
2427 pdev = parent;
2428
2429 if (!pci_acs_enabled(pdev, acs_flags))
2430 return false;
2431
2432 if (pci_is_root_bus(pdev->bus))
2433 return (end == NULL);
2434
2435 parent = pdev->bus->self;
2436 } while (pdev != end);
2437
2438 return true;
2439}
2440
57c2cf71
BH
2441/**
2442 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2443 * @dev: the PCI device
bb5c2de2 2444 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
57c2cf71
BH
2445 *
2446 * Perform INTx swizzling for a device behind one level of bridge. This is
2447 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2448 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2449 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2450 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 2451 */
3df425f3 2452u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 2453{
46b952a3
MW
2454 int slot;
2455
2456 if (pci_ari_enabled(dev->bus))
2457 slot = 0;
2458 else
2459 slot = PCI_SLOT(dev->devfn);
2460
2461 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2462}
2463
3c78bc61 2464int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1da177e4
LT
2465{
2466 u8 pin;
2467
514d207d 2468 pin = dev->pin;
1da177e4
LT
2469 if (!pin)
2470 return -1;
878f2e50 2471
8784fd4d 2472 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2473 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2474 dev = dev->bus->self;
2475 }
2476 *bridge = dev;
2477 return pin;
2478}
2479
68feac87
BH
2480/**
2481 * pci_common_swizzle - swizzle INTx all the way to root bridge
2482 * @dev: the PCI device
2483 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2484 *
2485 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2486 * bridges all the way up to a PCI root bus.
2487 */
2488u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2489{
2490 u8 pin = *pinp;
2491
1eb39487 2492 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2493 pin = pci_swizzle_interrupt_pin(dev, pin);
2494 dev = dev->bus->self;
2495 }
2496 *pinp = pin;
2497 return PCI_SLOT(dev->devfn);
2498}
e6b29dea 2499EXPORT_SYMBOL_GPL(pci_common_swizzle);
68feac87 2500
1da177e4
LT
2501/**
2502 * pci_release_region - Release a PCI bar
2503 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2504 * @bar: BAR to release
2505 *
2506 * Releases the PCI I/O and memory resources previously reserved by a
2507 * successful call to pci_request_region. Call this function only
2508 * after all use of the PCI regions has ceased.
2509 */
2510void pci_release_region(struct pci_dev *pdev, int bar)
2511{
9ac7849e
TH
2512 struct pci_devres *dr;
2513
1da177e4
LT
2514 if (pci_resource_len(pdev, bar) == 0)
2515 return;
2516 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2517 release_region(pci_resource_start(pdev, bar),
2518 pci_resource_len(pdev, bar));
2519 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2520 release_mem_region(pci_resource_start(pdev, bar),
2521 pci_resource_len(pdev, bar));
9ac7849e
TH
2522
2523 dr = find_pci_dr(pdev);
2524 if (dr)
2525 dr->region_mask &= ~(1 << bar);
1da177e4 2526}
b7fe9434 2527EXPORT_SYMBOL(pci_release_region);
1da177e4
LT
2528
2529/**
f5ddcac4 2530 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2531 * @pdev: PCI device whose resources are to be reserved
2532 * @bar: BAR to be reserved
2533 * @res_name: Name to be associated with resource.
f5ddcac4 2534 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2535 *
2536 * Mark the PCI region associated with PCI device @pdev BR @bar as
2537 * being reserved by owner @res_name. Do not access any
2538 * address inside the PCI regions unless this call returns
2539 * successfully.
2540 *
f5ddcac4
RD
2541 * If @exclusive is set, then the region is marked so that userspace
2542 * is explicitly not allowed to map the resource via /dev/mem or
f7625980 2543 * sysfs MMIO access.
f5ddcac4 2544 *
1da177e4
LT
2545 * Returns 0 on success, or %EBUSY on error. A warning
2546 * message is also printed on failure.
2547 */
3c78bc61
RD
2548static int __pci_request_region(struct pci_dev *pdev, int bar,
2549 const char *res_name, int exclusive)
1da177e4 2550{
9ac7849e
TH
2551 struct pci_devres *dr;
2552
1da177e4
LT
2553 if (pci_resource_len(pdev, bar) == 0)
2554 return 0;
f7625980 2555
1da177e4
LT
2556 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2557 if (!request_region(pci_resource_start(pdev, bar),
2558 pci_resource_len(pdev, bar), res_name))
2559 goto err_out;
3c78bc61 2560 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2561 if (!__request_mem_region(pci_resource_start(pdev, bar),
2562 pci_resource_len(pdev, bar), res_name,
2563 exclusive))
1da177e4
LT
2564 goto err_out;
2565 }
9ac7849e
TH
2566
2567 dr = find_pci_dr(pdev);
2568 if (dr)
2569 dr->region_mask |= 1 << bar;
2570
1da177e4
LT
2571 return 0;
2572
2573err_out:
c7dabef8 2574 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2575 &pdev->resource[bar]);
1da177e4
LT
2576 return -EBUSY;
2577}
2578
e8de1481 2579/**
f5ddcac4 2580 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2581 * @pdev: PCI device whose resources are to be reserved
2582 * @bar: BAR to be reserved
f5ddcac4 2583 * @res_name: Name to be associated with resource
e8de1481 2584 *
f5ddcac4 2585 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2586 * being reserved by owner @res_name. Do not access any
2587 * address inside the PCI regions unless this call returns
2588 * successfully.
2589 *
2590 * Returns 0 on success, or %EBUSY on error. A warning
2591 * message is also printed on failure.
2592 */
2593int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2594{
2595 return __pci_request_region(pdev, bar, res_name, 0);
2596}
b7fe9434 2597EXPORT_SYMBOL(pci_request_region);
e8de1481
AV
2598
2599/**
2600 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2601 * @pdev: PCI device whose resources are to be reserved
2602 * @bar: BAR to be reserved
2603 * @res_name: Name to be associated with resource.
2604 *
2605 * Mark the PCI region associated with PCI device @pdev BR @bar as
2606 * being reserved by owner @res_name. Do not access any
2607 * address inside the PCI regions unless this call returns
2608 * successfully.
2609 *
2610 * Returns 0 on success, or %EBUSY on error. A warning
2611 * message is also printed on failure.
2612 *
2613 * The key difference that _exclusive makes it that userspace is
2614 * explicitly not allowed to map the resource via /dev/mem or
f7625980 2615 * sysfs.
e8de1481 2616 */
3c78bc61
RD
2617int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
2618 const char *res_name)
e8de1481
AV
2619{
2620 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2621}
b7fe9434
RD
2622EXPORT_SYMBOL(pci_request_region_exclusive);
2623
c87deff7
HS
2624/**
2625 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2626 * @pdev: PCI device whose resources were previously reserved
2627 * @bars: Bitmask of BARs to be released
2628 *
2629 * Release selected PCI I/O and memory resources previously reserved.
2630 * Call this function only after all use of the PCI regions has ceased.
2631 */
2632void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2633{
2634 int i;
2635
2636 for (i = 0; i < 6; i++)
2637 if (bars & (1 << i))
2638 pci_release_region(pdev, i);
2639}
b7fe9434 2640EXPORT_SYMBOL(pci_release_selected_regions);
c87deff7 2641
9738abed 2642static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3c78bc61 2643 const char *res_name, int excl)
c87deff7
HS
2644{
2645 int i;
2646
2647 for (i = 0; i < 6; i++)
2648 if (bars & (1 << i))
e8de1481 2649 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2650 goto err_out;
2651 return 0;
2652
2653err_out:
3c78bc61 2654 while (--i >= 0)
c87deff7
HS
2655 if (bars & (1 << i))
2656 pci_release_region(pdev, i);
2657
2658 return -EBUSY;
2659}
1da177e4 2660
e8de1481
AV
2661
2662/**
2663 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2664 * @pdev: PCI device whose resources are to be reserved
2665 * @bars: Bitmask of BARs to be requested
2666 * @res_name: Name to be associated with resource
2667 */
2668int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2669 const char *res_name)
2670{
2671 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2672}
b7fe9434 2673EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 2674
3c78bc61
RD
2675int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
2676 const char *res_name)
e8de1481
AV
2677{
2678 return __pci_request_selected_regions(pdev, bars, res_name,
2679 IORESOURCE_EXCLUSIVE);
2680}
b7fe9434 2681EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
e8de1481 2682
1da177e4
LT
2683/**
2684 * pci_release_regions - Release reserved PCI I/O and memory resources
2685 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2686 *
2687 * Releases all PCI I/O and memory resources previously reserved by a
2688 * successful call to pci_request_regions. Call this function only
2689 * after all use of the PCI regions has ceased.
2690 */
2691
2692void pci_release_regions(struct pci_dev *pdev)
2693{
c87deff7 2694 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4 2695}
b7fe9434 2696EXPORT_SYMBOL(pci_release_regions);
1da177e4
LT
2697
2698/**
2699 * pci_request_regions - Reserved PCI I/O and memory resources
2700 * @pdev: PCI device whose resources are to be reserved
2701 * @res_name: Name to be associated with resource.
2702 *
2703 * Mark all PCI regions associated with PCI device @pdev as
2704 * being reserved by owner @res_name. Do not access any
2705 * address inside the PCI regions unless this call returns
2706 * successfully.
2707 *
2708 * Returns 0 on success, or %EBUSY on error. A warning
2709 * message is also printed on failure.
2710 */
3c990e92 2711int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2712{
c87deff7 2713 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4 2714}
b7fe9434 2715EXPORT_SYMBOL(pci_request_regions);
1da177e4 2716
e8de1481
AV
2717/**
2718 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2719 * @pdev: PCI device whose resources are to be reserved
2720 * @res_name: Name to be associated with resource.
2721 *
2722 * Mark all PCI regions associated with PCI device @pdev as
2723 * being reserved by owner @res_name. Do not access any
2724 * address inside the PCI regions unless this call returns
2725 * successfully.
2726 *
2727 * pci_request_regions_exclusive() will mark the region so that
f7625980 2728 * /dev/mem and the sysfs MMIO access will not be allowed.
e8de1481
AV
2729 *
2730 * Returns 0 on success, or %EBUSY on error. A warning
2731 * message is also printed on failure.
2732 */
2733int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2734{
2735 return pci_request_selected_regions_exclusive(pdev,
2736 ((1 << 6) - 1), res_name);
2737}
b7fe9434 2738EXPORT_SYMBOL(pci_request_regions_exclusive);
e8de1481 2739
8b921acf
LD
2740/**
2741 * pci_remap_iospace - Remap the memory mapped I/O space
2742 * @res: Resource describing the I/O space
2743 * @phys_addr: physical address of range to be mapped
2744 *
2745 * Remap the memory mapped I/O space described by the @res
2746 * and the CPU physical address @phys_addr into virtual address space.
2747 * Only architectures that have memory mapped IO functions defined
2748 * (and the PCI_IOBASE value defined) should call this function.
2749 */
2750int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
2751{
2752#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
2753 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
2754
2755 if (!(res->flags & IORESOURCE_IO))
2756 return -EINVAL;
2757
2758 if (res->end > IO_SPACE_LIMIT)
2759 return -EINVAL;
2760
2761 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
2762 pgprot_device(PAGE_KERNEL));
2763#else
2764 /* this architecture does not have memory mapped I/O space,
2765 so this function should never be called */
2766 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
2767 return -ENODEV;
2768#endif
2769}
2770
6a479079
BH
2771static void __pci_set_master(struct pci_dev *dev, bool enable)
2772{
2773 u16 old_cmd, cmd;
2774
2775 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2776 if (enable)
2777 cmd = old_cmd | PCI_COMMAND_MASTER;
2778 else
2779 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2780 if (cmd != old_cmd) {
2781 dev_dbg(&dev->dev, "%s bus mastering\n",
2782 enable ? "enabling" : "disabling");
2783 pci_write_config_word(dev, PCI_COMMAND, cmd);
2784 }
2785 dev->is_busmaster = enable;
2786}
e8de1481 2787
2b6f2c35
MS
2788/**
2789 * pcibios_setup - process "pci=" kernel boot arguments
2790 * @str: string used to pass in "pci=" kernel boot arguments
2791 *
2792 * Process kernel boot arguments. This is the default implementation.
2793 * Architecture specific implementations can override this as necessary.
2794 */
2795char * __weak __init pcibios_setup(char *str)
2796{
2797 return str;
2798}
2799
96c55900
MS
2800/**
2801 * pcibios_set_master - enable PCI bus-mastering for device dev
2802 * @dev: the PCI device to enable
2803 *
2804 * Enables PCI bus-mastering for the device. This is the default
2805 * implementation. Architecture specific implementations can override
2806 * this if necessary.
2807 */
2808void __weak pcibios_set_master(struct pci_dev *dev)
2809{
2810 u8 lat;
2811
f676678f
MS
2812 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2813 if (pci_is_pcie(dev))
2814 return;
2815
96c55900
MS
2816 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2817 if (lat < 16)
2818 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2819 else if (lat > pcibios_max_latency)
2820 lat = pcibios_max_latency;
2821 else
2822 return;
a006482b 2823
96c55900
MS
2824 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2825}
2826
1da177e4
LT
2827/**
2828 * pci_set_master - enables bus-mastering for device dev
2829 * @dev: the PCI device to enable
2830 *
2831 * Enables bus-mastering on the device and calls pcibios_set_master()
2832 * to do the needed arch specific settings.
2833 */
6a479079 2834void pci_set_master(struct pci_dev *dev)
1da177e4 2835{
6a479079 2836 __pci_set_master(dev, true);
1da177e4
LT
2837 pcibios_set_master(dev);
2838}
b7fe9434 2839EXPORT_SYMBOL(pci_set_master);
1da177e4 2840
6a479079
BH
2841/**
2842 * pci_clear_master - disables bus-mastering for device dev
2843 * @dev: the PCI device to disable
2844 */
2845void pci_clear_master(struct pci_dev *dev)
2846{
2847 __pci_set_master(dev, false);
2848}
b7fe9434 2849EXPORT_SYMBOL(pci_clear_master);
6a479079 2850
1da177e4 2851/**
edb2d97e
MW
2852 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2853 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2854 *
edb2d97e
MW
2855 * Helper function for pci_set_mwi.
2856 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2857 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2858 *
2859 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2860 */
15ea76d4 2861int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2862{
2863 u8 cacheline_size;
2864
2865 if (!pci_cache_line_size)
15ea76d4 2866 return -EINVAL;
1da177e4
LT
2867
2868 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2869 equal to or multiple of the right value. */
2870 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2871 if (cacheline_size >= pci_cache_line_size &&
2872 (cacheline_size % pci_cache_line_size) == 0)
2873 return 0;
2874
2875 /* Write the correct value. */
2876 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2877 /* Read it back. */
2878 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2879 if (cacheline_size == pci_cache_line_size)
2880 return 0;
2881
227f0647
RD
2882 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
2883 pci_cache_line_size << 2);
1da177e4
LT
2884
2885 return -EINVAL;
2886}
15ea76d4
TH
2887EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2888
1da177e4
LT
2889/**
2890 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2891 * @dev: the PCI device for which MWI is enabled
2892 *
694625c0 2893 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2894 *
2895 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2896 */
3c78bc61 2897int pci_set_mwi(struct pci_dev *dev)
1da177e4 2898{
b7fe9434
RD
2899#ifdef PCI_DISABLE_MWI
2900 return 0;
2901#else
1da177e4
LT
2902 int rc;
2903 u16 cmd;
2904
edb2d97e 2905 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2906 if (rc)
2907 return rc;
2908
2909 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3c78bc61 2910 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2911 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2912 cmd |= PCI_COMMAND_INVALIDATE;
2913 pci_write_config_word(dev, PCI_COMMAND, cmd);
2914 }
1da177e4 2915 return 0;
b7fe9434 2916#endif
1da177e4 2917}
b7fe9434 2918EXPORT_SYMBOL(pci_set_mwi);
1da177e4 2919
694625c0
RD
2920/**
2921 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2922 * @dev: the PCI device for which MWI is enabled
2923 *
2924 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2925 * Callers are not required to check the return value.
2926 *
2927 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2928 */
2929int pci_try_set_mwi(struct pci_dev *dev)
2930{
b7fe9434
RD
2931#ifdef PCI_DISABLE_MWI
2932 return 0;
2933#else
2934 return pci_set_mwi(dev);
2935#endif
694625c0 2936}
b7fe9434 2937EXPORT_SYMBOL(pci_try_set_mwi);
694625c0 2938
1da177e4
LT
2939/**
2940 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2941 * @dev: the PCI device to disable
2942 *
2943 * Disables PCI Memory-Write-Invalidate transaction on the device
2944 */
3c78bc61 2945void pci_clear_mwi(struct pci_dev *dev)
1da177e4 2946{
b7fe9434 2947#ifndef PCI_DISABLE_MWI
1da177e4
LT
2948 u16 cmd;
2949
2950 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2951 if (cmd & PCI_COMMAND_INVALIDATE) {
2952 cmd &= ~PCI_COMMAND_INVALIDATE;
2953 pci_write_config_word(dev, PCI_COMMAND, cmd);
2954 }
b7fe9434 2955#endif
1da177e4 2956}
b7fe9434 2957EXPORT_SYMBOL(pci_clear_mwi);
1da177e4 2958
a04ce0ff
BR
2959/**
2960 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2961 * @pdev: the PCI device to operate on
2962 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2963 *
2964 * Enables/disables PCI INTx for device dev
2965 */
3c78bc61 2966void pci_intx(struct pci_dev *pdev, int enable)
a04ce0ff
BR
2967{
2968 u16 pci_command, new;
2969
2970 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2971
3c78bc61 2972 if (enable)
a04ce0ff 2973 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3c78bc61 2974 else
a04ce0ff 2975 new = pci_command | PCI_COMMAND_INTX_DISABLE;
a04ce0ff
BR
2976
2977 if (new != pci_command) {
9ac7849e
TH
2978 struct pci_devres *dr;
2979
2fd9d74b 2980 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2981
2982 dr = find_pci_dr(pdev);
2983 if (dr && !dr->restore_intx) {
2984 dr->restore_intx = 1;
2985 dr->orig_intx = !enable;
2986 }
a04ce0ff
BR
2987 }
2988}
b7fe9434 2989EXPORT_SYMBOL_GPL(pci_intx);
a04ce0ff 2990
a2e27787
JK
2991/**
2992 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2993 * @dev: the PCI device to operate on
a2e27787
JK
2994 *
2995 * Check if the device dev support INTx masking via the config space
2996 * command word.
2997 */
2998bool pci_intx_mask_supported(struct pci_dev *dev)
2999{
3000 bool mask_supported = false;
3001 u16 orig, new;
3002
fbebb9fd
BH
3003 if (dev->broken_intx_masking)
3004 return false;
3005
a2e27787
JK
3006 pci_cfg_access_lock(dev);
3007
3008 pci_read_config_word(dev, PCI_COMMAND, &orig);
3009 pci_write_config_word(dev, PCI_COMMAND,
3010 orig ^ PCI_COMMAND_INTX_DISABLE);
3011 pci_read_config_word(dev, PCI_COMMAND, &new);
3012
3013 /*
3014 * There's no way to protect against hardware bugs or detect them
3015 * reliably, but as long as we know what the value should be, let's
3016 * go ahead and check it.
3017 */
3018 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
227f0647
RD
3019 dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3020 orig, new);
a2e27787
JK
3021 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3022 mask_supported = true;
3023 pci_write_config_word(dev, PCI_COMMAND, orig);
3024 }
3025
3026 pci_cfg_access_unlock(dev);
3027 return mask_supported;
3028}
3029EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3030
3031static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3032{
3033 struct pci_bus *bus = dev->bus;
3034 bool mask_updated = true;
3035 u32 cmd_status_dword;
3036 u16 origcmd, newcmd;
3037 unsigned long flags;
3038 bool irq_pending;
3039
3040 /*
3041 * We do a single dword read to retrieve both command and status.
3042 * Document assumptions that make this possible.
3043 */
3044 BUILD_BUG_ON(PCI_COMMAND % 4);
3045 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3046
3047 raw_spin_lock_irqsave(&pci_lock, flags);
3048
3049 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3050
3051 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3052
3053 /*
3054 * Check interrupt status register to see whether our device
3055 * triggered the interrupt (when masking) or the next IRQ is
3056 * already pending (when unmasking).
3057 */
3058 if (mask != irq_pending) {
3059 mask_updated = false;
3060 goto done;
3061 }
3062
3063 origcmd = cmd_status_dword;
3064 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3065 if (mask)
3066 newcmd |= PCI_COMMAND_INTX_DISABLE;
3067 if (newcmd != origcmd)
3068 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3069
3070done:
3071 raw_spin_unlock_irqrestore(&pci_lock, flags);
3072
3073 return mask_updated;
3074}
3075
3076/**
3077 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 3078 * @dev: the PCI device to operate on
a2e27787
JK
3079 *
3080 * Check if the device dev has its INTx line asserted, mask it and
3081 * return true in that case. False is returned if not interrupt was
3082 * pending.
3083 */
3084bool pci_check_and_mask_intx(struct pci_dev *dev)
3085{
3086 return pci_check_and_set_intx_mask(dev, true);
3087}
3088EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3089
3090/**
ebd50b93 3091 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
6e9292c5 3092 * @dev: the PCI device to operate on
a2e27787
JK
3093 *
3094 * Check if the device dev has its INTx line asserted, unmask it if not
3095 * and return true. False is returned and the mask remains active if
3096 * there was still an interrupt pending.
3097 */
3098bool pci_check_and_unmask_intx(struct pci_dev *dev)
3099{
3100 return pci_check_and_set_intx_mask(dev, false);
3101}
3102EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3103
4d57cdfa
FT
3104int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3105{
3106 return dma_set_max_seg_size(&dev->dev, size);
3107}
3108EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3109
59fc67de
FT
3110int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3111{
3112 return dma_set_seg_boundary(&dev->dev, mask);
3113}
3114EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3115
3775a209
CL
3116/**
3117 * pci_wait_for_pending_transaction - waits for pending transaction
3118 * @dev: the PCI device to operate on
3119 *
3120 * Return 0 if transaction is pending 1 otherwise.
3121 */
3122int pci_wait_for_pending_transaction(struct pci_dev *dev)
8dd7f803 3123{
157e876f
AW
3124 if (!pci_is_pcie(dev))
3125 return 1;
8c1c699f 3126
d0b4cc4e
GS
3127 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3128 PCI_EXP_DEVSTA_TRPND);
3775a209
CL
3129}
3130EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3131
3132static int pcie_flr(struct pci_dev *dev, int probe)
3133{
3134 u32 cap;
3135
3136 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3137 if (!(cap & PCI_EXP_DEVCAP_FLR))
3138 return -ENOTTY;
3139
3140 if (probe)
3141 return 0;
3142
3143 if (!pci_wait_for_pending_transaction(dev))
bb383e28 3144 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
8c1c699f 3145
59875ae4 3146 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
8c1c699f 3147 msleep(100);
8dd7f803
SY
3148 return 0;
3149}
d91cdc74 3150
8c1c699f 3151static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3152{
8c1c699f 3153 int pos;
1ca88797
SY
3154 u8 cap;
3155
8c1c699f
YZ
3156 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3157 if (!pos)
1ca88797 3158 return -ENOTTY;
8c1c699f
YZ
3159
3160 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3161 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3162 return -ENOTTY;
3163
3164 if (probe)
3165 return 0;
3166
d066c946
AW
3167 /*
3168 * Wait for Transaction Pending bit to clear. A word-aligned test
3169 * is used, so we use the conrol offset rather than status and shift
3170 * the test bit to match.
3171 */
bb383e28 3172 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
d066c946 3173 PCI_AF_STATUS_TP << 8))
bb383e28 3174 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
5fe5db05 3175
8c1c699f 3176 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3177 msleep(100);
1ca88797
SY
3178 return 0;
3179}
3180
83d74e03
RW
3181/**
3182 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3183 * @dev: Device to reset.
3184 * @probe: If set, only check if the device can be reset this way.
3185 *
3186 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3187 * unset, it will be reinitialized internally when going from PCI_D3hot to
3188 * PCI_D0. If that's the case and the device is not in a low-power state
3189 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3190 *
3191 * NOTE: This causes the caller to sleep for twice the device power transition
3192 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
f7625980 3193 * by default (i.e. unless the @dev's d3_delay field has a different value).
83d74e03
RW
3194 * Moreover, only devices in D0 can be reset by this function.
3195 */
f85876ba 3196static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3197{
f85876ba
YZ
3198 u16 csr;
3199
51e53738 3200 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
f85876ba 3201 return -ENOTTY;
d91cdc74 3202
f85876ba
YZ
3203 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3204 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3205 return -ENOTTY;
d91cdc74 3206
f85876ba
YZ
3207 if (probe)
3208 return 0;
1ca88797 3209
f85876ba
YZ
3210 if (dev->current_state != PCI_D0)
3211 return -EINVAL;
3212
3213 csr &= ~PCI_PM_CTRL_STATE_MASK;
3214 csr |= PCI_D3hot;
3215 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3216 pci_dev_d3_sleep(dev);
f85876ba
YZ
3217
3218 csr &= ~PCI_PM_CTRL_STATE_MASK;
3219 csr |= PCI_D0;
3220 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3221 pci_dev_d3_sleep(dev);
f85876ba
YZ
3222
3223 return 0;
3224}
3225
9e33002f 3226void pci_reset_secondary_bus(struct pci_dev *dev)
c12ff1df
YZ
3227{
3228 u16 ctrl;
64e8674f
AW
3229
3230 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3231 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3232 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
de0c548c
AW
3233 /*
3234 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
f7625980 3235 * this to 2ms to ensure that we meet the minimum requirement.
de0c548c
AW
3236 */
3237 msleep(2);
64e8674f
AW
3238
3239 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3240 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
de0c548c
AW
3241
3242 /*
3243 * Trhfa for conventional PCI is 2^25 clock cycles.
3244 * Assuming a minimum 33MHz clock this results in a 1s
3245 * delay before we can consider subordinate devices to
3246 * be re-initialized. PCIe has some ways to shorten this,
3247 * but we don't make use of them yet.
3248 */
3249 ssleep(1);
64e8674f 3250}
d92a208d 3251
9e33002f
GS
3252void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
3253{
3254 pci_reset_secondary_bus(dev);
3255}
3256
d92a208d
GS
3257/**
3258 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3259 * @dev: Bridge device
3260 *
3261 * Use the bridge control register to assert reset on the secondary bus.
3262 * Devices on the secondary bus are left in power-on state.
3263 */
3264void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3265{
3266 pcibios_reset_secondary_bus(dev);
3267}
64e8674f
AW
3268EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3269
3270static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3271{
c12ff1df
YZ
3272 struct pci_dev *pdev;
3273
f331a859
AW
3274 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
3275 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
c12ff1df
YZ
3276 return -ENOTTY;
3277
3278 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3279 if (pdev != dev)
3280 return -ENOTTY;
3281
3282 if (probe)
3283 return 0;
3284
64e8674f 3285 pci_reset_bridge_secondary_bus(dev->bus->self);
c12ff1df
YZ
3286
3287 return 0;
3288}
3289
608c3881
AW
3290static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3291{
3292 int rc = -ENOTTY;
3293
3294 if (!hotplug || !try_module_get(hotplug->ops->owner))
3295 return rc;
3296
3297 if (hotplug->ops->reset_slot)
3298 rc = hotplug->ops->reset_slot(hotplug, probe);
3299
3300 module_put(hotplug->ops->owner);
3301
3302 return rc;
3303}
3304
3305static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3306{
3307 struct pci_dev *pdev;
3308
f331a859
AW
3309 if (dev->subordinate || !dev->slot ||
3310 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
608c3881
AW
3311 return -ENOTTY;
3312
3313 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3314 if (pdev != dev && pdev->slot == dev->slot)
3315 return -ENOTTY;
3316
3317 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3318}
3319
977f857c 3320static int __pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3321{
8c1c699f
YZ
3322 int rc;
3323
3324 might_sleep();
3325
b9c3b266
DC
3326 rc = pci_dev_specific_reset(dev, probe);
3327 if (rc != -ENOTTY)
3328 goto done;
3329
8c1c699f
YZ
3330 rc = pcie_flr(dev, probe);
3331 if (rc != -ENOTTY)
3332 goto done;
d91cdc74 3333
8c1c699f 3334 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3335 if (rc != -ENOTTY)
3336 goto done;
3337
3338 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3339 if (rc != -ENOTTY)
3340 goto done;
3341
608c3881
AW
3342 rc = pci_dev_reset_slot_function(dev, probe);
3343 if (rc != -ENOTTY)
3344 goto done;
3345
c12ff1df 3346 rc = pci_parent_bus_reset(dev, probe);
8c1c699f 3347done:
977f857c
KRW
3348 return rc;
3349}
3350
77cb985a
AW
3351static void pci_dev_lock(struct pci_dev *dev)
3352{
3353 pci_cfg_access_lock(dev);
3354 /* block PM suspend, driver probe, etc. */
3355 device_lock(&dev->dev);
3356}
3357
61cf16d8
AW
3358/* Return 1 on successful lock, 0 on contention */
3359static int pci_dev_trylock(struct pci_dev *dev)
3360{
3361 if (pci_cfg_access_trylock(dev)) {
3362 if (device_trylock(&dev->dev))
3363 return 1;
3364 pci_cfg_access_unlock(dev);
3365 }
3366
3367 return 0;
3368}
3369
77cb985a
AW
3370static void pci_dev_unlock(struct pci_dev *dev)
3371{
3372 device_unlock(&dev->dev);
3373 pci_cfg_access_unlock(dev);
3374}
3375
3ebe7f9f
KB
3376/**
3377 * pci_reset_notify - notify device driver of reset
3378 * @dev: device to be notified of reset
3379 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
3380 * completed
3381 *
3382 * Must be called prior to device access being disabled and after device
3383 * access is restored.
3384 */
3385static void pci_reset_notify(struct pci_dev *dev, bool prepare)
3386{
3387 const struct pci_error_handlers *err_handler =
3388 dev->driver ? dev->driver->err_handler : NULL;
3389 if (err_handler && err_handler->reset_notify)
3390 err_handler->reset_notify(dev, prepare);
3391}
3392
77cb985a
AW
3393static void pci_dev_save_and_disable(struct pci_dev *dev)
3394{
3ebe7f9f
KB
3395 pci_reset_notify(dev, true);
3396
a6cbaade
AW
3397 /*
3398 * Wake-up device prior to save. PM registers default to D0 after
3399 * reset and a simple register restore doesn't reliably return
3400 * to a non-D0 state anyway.
3401 */
3402 pci_set_power_state(dev, PCI_D0);
3403
77cb985a
AW
3404 pci_save_state(dev);
3405 /*
3406 * Disable the device by clearing the Command register, except for
3407 * INTx-disable which is set. This not only disables MMIO and I/O port
3408 * BARs, but also prevents the device from being Bus Master, preventing
3409 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
3410 * compliant devices, INTx-disable prevents legacy interrupts.
3411 */
3412 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3413}
3414
3415static void pci_dev_restore(struct pci_dev *dev)
3416{
3417 pci_restore_state(dev);
3ebe7f9f 3418 pci_reset_notify(dev, false);
77cb985a
AW
3419}
3420
977f857c
KRW
3421static int pci_dev_reset(struct pci_dev *dev, int probe)
3422{
3423 int rc;
3424
77cb985a
AW
3425 if (!probe)
3426 pci_dev_lock(dev);
977f857c
KRW
3427
3428 rc = __pci_dev_reset(dev, probe);
3429
77cb985a
AW
3430 if (!probe)
3431 pci_dev_unlock(dev);
3432
8c1c699f 3433 return rc;
d91cdc74 3434}
3ebe7f9f 3435
d91cdc74 3436/**
8c1c699f
YZ
3437 * __pci_reset_function - reset a PCI device function
3438 * @dev: PCI device to reset
d91cdc74
SY
3439 *
3440 * Some devices allow an individual function to be reset without affecting
3441 * other functions in the same device. The PCI device must be responsive
3442 * to PCI config space in order to use this function.
3443 *
3444 * The device function is presumed to be unused when this function is called.
3445 * Resetting the device will make the contents of PCI configuration space
3446 * random, so any caller of this must be prepared to reinitialise the
3447 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3448 * etc.
3449 *
8c1c699f 3450 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3451 * device doesn't support resetting a single function.
3452 */
8c1c699f 3453int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3454{
8c1c699f 3455 return pci_dev_reset(dev, 0);
d91cdc74 3456}
8c1c699f 3457EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3458
6fbf9e7a
KRW
3459/**
3460 * __pci_reset_function_locked - reset a PCI device function while holding
3461 * the @dev mutex lock.
3462 * @dev: PCI device to reset
3463 *
3464 * Some devices allow an individual function to be reset without affecting
3465 * other functions in the same device. The PCI device must be responsive
3466 * to PCI config space in order to use this function.
3467 *
3468 * The device function is presumed to be unused and the caller is holding
3469 * the device mutex lock when this function is called.
3470 * Resetting the device will make the contents of PCI configuration space
3471 * random, so any caller of this must be prepared to reinitialise the
3472 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3473 * etc.
3474 *
3475 * Returns 0 if the device function was successfully reset or negative if the
3476 * device doesn't support resetting a single function.
3477 */
3478int __pci_reset_function_locked(struct pci_dev *dev)
3479{
977f857c 3480 return __pci_dev_reset(dev, 0);
6fbf9e7a
KRW
3481}
3482EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3483
711d5779
MT
3484/**
3485 * pci_probe_reset_function - check whether the device can be safely reset
3486 * @dev: PCI device to reset
3487 *
3488 * Some devices allow an individual function to be reset without affecting
3489 * other functions in the same device. The PCI device must be responsive
3490 * to PCI config space in order to use this function.
3491 *
3492 * Returns 0 if the device function can be reset or negative if the
3493 * device doesn't support resetting a single function.
3494 */
3495int pci_probe_reset_function(struct pci_dev *dev)
3496{
3497 return pci_dev_reset(dev, 1);
3498}
3499
8dd7f803 3500/**
8c1c699f
YZ
3501 * pci_reset_function - quiesce and reset a PCI device function
3502 * @dev: PCI device to reset
8dd7f803
SY
3503 *
3504 * Some devices allow an individual function to be reset without affecting
3505 * other functions in the same device. The PCI device must be responsive
3506 * to PCI config space in order to use this function.
3507 *
3508 * This function does not just reset the PCI portion of a device, but
3509 * clears all the state associated with the device. This function differs
8c1c699f 3510 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3511 * over the reset.
3512 *
8c1c699f 3513 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3514 * device doesn't support resetting a single function.
3515 */
3516int pci_reset_function(struct pci_dev *dev)
3517{
8c1c699f 3518 int rc;
8dd7f803 3519
8c1c699f
YZ
3520 rc = pci_dev_reset(dev, 1);
3521 if (rc)
3522 return rc;
8dd7f803 3523
77cb985a 3524 pci_dev_save_and_disable(dev);
8dd7f803 3525
8c1c699f 3526 rc = pci_dev_reset(dev, 0);
8dd7f803 3527
77cb985a 3528 pci_dev_restore(dev);
8dd7f803 3529
8c1c699f 3530 return rc;
8dd7f803
SY
3531}
3532EXPORT_SYMBOL_GPL(pci_reset_function);
3533
61cf16d8
AW
3534/**
3535 * pci_try_reset_function - quiesce and reset a PCI device function
3536 * @dev: PCI device to reset
3537 *
3538 * Same as above, except return -EAGAIN if unable to lock device.
3539 */
3540int pci_try_reset_function(struct pci_dev *dev)
3541{
3542 int rc;
3543
3544 rc = pci_dev_reset(dev, 1);
3545 if (rc)
3546 return rc;
3547
3548 pci_dev_save_and_disable(dev);
3549
3550 if (pci_dev_trylock(dev)) {
3551 rc = __pci_dev_reset(dev, 0);
3552 pci_dev_unlock(dev);
3553 } else
3554 rc = -EAGAIN;
3555
3556 pci_dev_restore(dev);
3557
3558 return rc;
3559}
3560EXPORT_SYMBOL_GPL(pci_try_reset_function);
3561
f331a859
AW
3562/* Do any devices on or below this bus prevent a bus reset? */
3563static bool pci_bus_resetable(struct pci_bus *bus)
3564{
3565 struct pci_dev *dev;
3566
3567 list_for_each_entry(dev, &bus->devices, bus_list) {
3568 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3569 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3570 return false;
3571 }
3572
3573 return true;
3574}
3575
090a3c53
AW
3576/* Lock devices from the top of the tree down */
3577static void pci_bus_lock(struct pci_bus *bus)
3578{
3579 struct pci_dev *dev;
3580
3581 list_for_each_entry(dev, &bus->devices, bus_list) {
3582 pci_dev_lock(dev);
3583 if (dev->subordinate)
3584 pci_bus_lock(dev->subordinate);
3585 }
3586}
3587
3588/* Unlock devices from the bottom of the tree up */
3589static void pci_bus_unlock(struct pci_bus *bus)
3590{
3591 struct pci_dev *dev;
3592
3593 list_for_each_entry(dev, &bus->devices, bus_list) {
3594 if (dev->subordinate)
3595 pci_bus_unlock(dev->subordinate);
3596 pci_dev_unlock(dev);
3597 }
3598}
3599
61cf16d8
AW
3600/* Return 1 on successful lock, 0 on contention */
3601static int pci_bus_trylock(struct pci_bus *bus)
3602{
3603 struct pci_dev *dev;
3604
3605 list_for_each_entry(dev, &bus->devices, bus_list) {
3606 if (!pci_dev_trylock(dev))
3607 goto unlock;
3608 if (dev->subordinate) {
3609 if (!pci_bus_trylock(dev->subordinate)) {
3610 pci_dev_unlock(dev);
3611 goto unlock;
3612 }
3613 }
3614 }
3615 return 1;
3616
3617unlock:
3618 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
3619 if (dev->subordinate)
3620 pci_bus_unlock(dev->subordinate);
3621 pci_dev_unlock(dev);
3622 }
3623 return 0;
3624}
3625
f331a859
AW
3626/* Do any devices on or below this slot prevent a bus reset? */
3627static bool pci_slot_resetable(struct pci_slot *slot)
3628{
3629 struct pci_dev *dev;
3630
3631 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3632 if (!dev->slot || dev->slot != slot)
3633 continue;
3634 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3635 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3636 return false;
3637 }
3638
3639 return true;
3640}
3641
090a3c53
AW
3642/* Lock devices from the top of the tree down */
3643static void pci_slot_lock(struct pci_slot *slot)
3644{
3645 struct pci_dev *dev;
3646
3647 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3648 if (!dev->slot || dev->slot != slot)
3649 continue;
3650 pci_dev_lock(dev);
3651 if (dev->subordinate)
3652 pci_bus_lock(dev->subordinate);
3653 }
3654}
3655
3656/* Unlock devices from the bottom of the tree up */
3657static void pci_slot_unlock(struct pci_slot *slot)
3658{
3659 struct pci_dev *dev;
3660
3661 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3662 if (!dev->slot || dev->slot != slot)
3663 continue;
3664 if (dev->subordinate)
3665 pci_bus_unlock(dev->subordinate);
3666 pci_dev_unlock(dev);
3667 }
3668}
3669
61cf16d8
AW
3670/* Return 1 on successful lock, 0 on contention */
3671static int pci_slot_trylock(struct pci_slot *slot)
3672{
3673 struct pci_dev *dev;
3674
3675 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3676 if (!dev->slot || dev->slot != slot)
3677 continue;
3678 if (!pci_dev_trylock(dev))
3679 goto unlock;
3680 if (dev->subordinate) {
3681 if (!pci_bus_trylock(dev->subordinate)) {
3682 pci_dev_unlock(dev);
3683 goto unlock;
3684 }
3685 }
3686 }
3687 return 1;
3688
3689unlock:
3690 list_for_each_entry_continue_reverse(dev,
3691 &slot->bus->devices, bus_list) {
3692 if (!dev->slot || dev->slot != slot)
3693 continue;
3694 if (dev->subordinate)
3695 pci_bus_unlock(dev->subordinate);
3696 pci_dev_unlock(dev);
3697 }
3698 return 0;
3699}
3700
090a3c53
AW
3701/* Save and disable devices from the top of the tree down */
3702static void pci_bus_save_and_disable(struct pci_bus *bus)
3703{
3704 struct pci_dev *dev;
3705
3706 list_for_each_entry(dev, &bus->devices, bus_list) {
3707 pci_dev_save_and_disable(dev);
3708 if (dev->subordinate)
3709 pci_bus_save_and_disable(dev->subordinate);
3710 }
3711}
3712
3713/*
3714 * Restore devices from top of the tree down - parent bridges need to be
3715 * restored before we can get to subordinate devices.
3716 */
3717static void pci_bus_restore(struct pci_bus *bus)
3718{
3719 struct pci_dev *dev;
3720
3721 list_for_each_entry(dev, &bus->devices, bus_list) {
3722 pci_dev_restore(dev);
3723 if (dev->subordinate)
3724 pci_bus_restore(dev->subordinate);
3725 }
3726}
3727
3728/* Save and disable devices from the top of the tree down */
3729static void pci_slot_save_and_disable(struct pci_slot *slot)
3730{
3731 struct pci_dev *dev;
3732
3733 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3734 if (!dev->slot || dev->slot != slot)
3735 continue;
3736 pci_dev_save_and_disable(dev);
3737 if (dev->subordinate)
3738 pci_bus_save_and_disable(dev->subordinate);
3739 }
3740}
3741
3742/*
3743 * Restore devices from top of the tree down - parent bridges need to be
3744 * restored before we can get to subordinate devices.
3745 */
3746static void pci_slot_restore(struct pci_slot *slot)
3747{
3748 struct pci_dev *dev;
3749
3750 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3751 if (!dev->slot || dev->slot != slot)
3752 continue;
3753 pci_dev_restore(dev);
3754 if (dev->subordinate)
3755 pci_bus_restore(dev->subordinate);
3756 }
3757}
3758
3759static int pci_slot_reset(struct pci_slot *slot, int probe)
3760{
3761 int rc;
3762
f331a859 3763 if (!slot || !pci_slot_resetable(slot))
090a3c53
AW
3764 return -ENOTTY;
3765
3766 if (!probe)
3767 pci_slot_lock(slot);
3768
3769 might_sleep();
3770
3771 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
3772
3773 if (!probe)
3774 pci_slot_unlock(slot);
3775
3776 return rc;
3777}
3778
9a3d2b9b
AW
3779/**
3780 * pci_probe_reset_slot - probe whether a PCI slot can be reset
3781 * @slot: PCI slot to probe
3782 *
3783 * Return 0 if slot can be reset, negative if a slot reset is not supported.
3784 */
3785int pci_probe_reset_slot(struct pci_slot *slot)
3786{
3787 return pci_slot_reset(slot, 1);
3788}
3789EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
3790
090a3c53
AW
3791/**
3792 * pci_reset_slot - reset a PCI slot
3793 * @slot: PCI slot to reset
3794 *
3795 * A PCI bus may host multiple slots, each slot may support a reset mechanism
3796 * independent of other slots. For instance, some slots may support slot power
3797 * control. In the case of a 1:1 bus to slot architecture, this function may
3798 * wrap the bus reset to avoid spurious slot related events such as hotplug.
3799 * Generally a slot reset should be attempted before a bus reset. All of the
3800 * function of the slot and any subordinate buses behind the slot are reset
3801 * through this function. PCI config space of all devices in the slot and
3802 * behind the slot is saved before and restored after reset.
3803 *
3804 * Return 0 on success, non-zero on error.
3805 */
3806int pci_reset_slot(struct pci_slot *slot)
3807{
3808 int rc;
3809
3810 rc = pci_slot_reset(slot, 1);
3811 if (rc)
3812 return rc;
3813
3814 pci_slot_save_and_disable(slot);
3815
3816 rc = pci_slot_reset(slot, 0);
3817
3818 pci_slot_restore(slot);
3819
3820 return rc;
3821}
3822EXPORT_SYMBOL_GPL(pci_reset_slot);
3823
61cf16d8
AW
3824/**
3825 * pci_try_reset_slot - Try to reset a PCI slot
3826 * @slot: PCI slot to reset
3827 *
3828 * Same as above except return -EAGAIN if the slot cannot be locked
3829 */
3830int pci_try_reset_slot(struct pci_slot *slot)
3831{
3832 int rc;
3833
3834 rc = pci_slot_reset(slot, 1);
3835 if (rc)
3836 return rc;
3837
3838 pci_slot_save_and_disable(slot);
3839
3840 if (pci_slot_trylock(slot)) {
3841 might_sleep();
3842 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
3843 pci_slot_unlock(slot);
3844 } else
3845 rc = -EAGAIN;
3846
3847 pci_slot_restore(slot);
3848
3849 return rc;
3850}
3851EXPORT_SYMBOL_GPL(pci_try_reset_slot);
3852
090a3c53
AW
3853static int pci_bus_reset(struct pci_bus *bus, int probe)
3854{
f331a859 3855 if (!bus->self || !pci_bus_resetable(bus))
090a3c53
AW
3856 return -ENOTTY;
3857
3858 if (probe)
3859 return 0;
3860
3861 pci_bus_lock(bus);
3862
3863 might_sleep();
3864
3865 pci_reset_bridge_secondary_bus(bus->self);
3866
3867 pci_bus_unlock(bus);
3868
3869 return 0;
3870}
3871
9a3d2b9b
AW
3872/**
3873 * pci_probe_reset_bus - probe whether a PCI bus can be reset
3874 * @bus: PCI bus to probe
3875 *
3876 * Return 0 if bus can be reset, negative if a bus reset is not supported.
3877 */
3878int pci_probe_reset_bus(struct pci_bus *bus)
3879{
3880 return pci_bus_reset(bus, 1);
3881}
3882EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
3883
090a3c53
AW
3884/**
3885 * pci_reset_bus - reset a PCI bus
3886 * @bus: top level PCI bus to reset
3887 *
3888 * Do a bus reset on the given bus and any subordinate buses, saving
3889 * and restoring state of all devices.
3890 *
3891 * Return 0 on success, non-zero on error.
3892 */
3893int pci_reset_bus(struct pci_bus *bus)
3894{
3895 int rc;
3896
3897 rc = pci_bus_reset(bus, 1);
3898 if (rc)
3899 return rc;
3900
3901 pci_bus_save_and_disable(bus);
3902
3903 rc = pci_bus_reset(bus, 0);
3904
3905 pci_bus_restore(bus);
3906
3907 return rc;
3908}
3909EXPORT_SYMBOL_GPL(pci_reset_bus);
3910
61cf16d8
AW
3911/**
3912 * pci_try_reset_bus - Try to reset a PCI bus
3913 * @bus: top level PCI bus to reset
3914 *
3915 * Same as above except return -EAGAIN if the bus cannot be locked
3916 */
3917int pci_try_reset_bus(struct pci_bus *bus)
3918{
3919 int rc;
3920
3921 rc = pci_bus_reset(bus, 1);
3922 if (rc)
3923 return rc;
3924
3925 pci_bus_save_and_disable(bus);
3926
3927 if (pci_bus_trylock(bus)) {
3928 might_sleep();
3929 pci_reset_bridge_secondary_bus(bus->self);
3930 pci_bus_unlock(bus);
3931 } else
3932 rc = -EAGAIN;
3933
3934 pci_bus_restore(bus);
3935
3936 return rc;
3937}
3938EXPORT_SYMBOL_GPL(pci_try_reset_bus);
3939
d556ad4b
PO
3940/**
3941 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3942 * @dev: PCI device to query
3943 *
3944 * Returns mmrbc: maximum designed memory read count in bytes
3945 * or appropriate error value.
3946 */
3947int pcix_get_max_mmrbc(struct pci_dev *dev)
3948{
7c9e2b1c 3949 int cap;
d556ad4b
PO
3950 u32 stat;
3951
3952 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3953 if (!cap)
3954 return -EINVAL;
3955
7c9e2b1c 3956 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3957 return -EINVAL;
3958
25daeb55 3959 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3960}
3961EXPORT_SYMBOL(pcix_get_max_mmrbc);
3962
3963/**
3964 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3965 * @dev: PCI device to query
3966 *
3967 * Returns mmrbc: maximum memory read count in bytes
3968 * or appropriate error value.
3969 */
3970int pcix_get_mmrbc(struct pci_dev *dev)
3971{
7c9e2b1c 3972 int cap;
bdc2bda7 3973 u16 cmd;
d556ad4b
PO
3974
3975 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3976 if (!cap)
3977 return -EINVAL;
3978
7c9e2b1c
DN
3979 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3980 return -EINVAL;
d556ad4b 3981
7c9e2b1c 3982 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3983}
3984EXPORT_SYMBOL(pcix_get_mmrbc);
3985
3986/**
3987 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3988 * @dev: PCI device to query
3989 * @mmrbc: maximum memory read count in bytes
3990 * valid values are 512, 1024, 2048, 4096
3991 *
3992 * If possible sets maximum memory read byte count, some bridges have erratas
3993 * that prevent this.
3994 */
3995int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3996{
7c9e2b1c 3997 int cap;
bdc2bda7
DN
3998 u32 stat, v, o;
3999 u16 cmd;
d556ad4b 4000
229f5afd 4001 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 4002 return -EINVAL;
d556ad4b
PO
4003
4004 v = ffs(mmrbc) - 10;
4005
4006 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4007 if (!cap)
7c9e2b1c 4008 return -EINVAL;
d556ad4b 4009
7c9e2b1c
DN
4010 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4011 return -EINVAL;
d556ad4b
PO
4012
4013 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4014 return -E2BIG;
4015
7c9e2b1c
DN
4016 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4017 return -EINVAL;
d556ad4b
PO
4018
4019 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4020 if (o != v) {
809a3bf9 4021 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
d556ad4b
PO
4022 return -EIO;
4023
4024 cmd &= ~PCI_X_CMD_MAX_READ;
4025 cmd |= v << 2;
7c9e2b1c
DN
4026 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4027 return -EIO;
d556ad4b 4028 }
7c9e2b1c 4029 return 0;
d556ad4b
PO
4030}
4031EXPORT_SYMBOL(pcix_set_mmrbc);
4032
4033/**
4034 * pcie_get_readrq - get PCI Express read request size
4035 * @dev: PCI device to query
4036 *
4037 * Returns maximum memory read request in bytes
4038 * or appropriate error value.
4039 */
4040int pcie_get_readrq(struct pci_dev *dev)
4041{
d556ad4b
PO
4042 u16 ctl;
4043
59875ae4 4044 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
d556ad4b 4045
59875ae4 4046 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
4047}
4048EXPORT_SYMBOL(pcie_get_readrq);
4049
4050/**
4051 * pcie_set_readrq - set PCI Express maximum memory read request
4052 * @dev: PCI device to query
42e61f4a 4053 * @rq: maximum memory read count in bytes
d556ad4b
PO
4054 * valid values are 128, 256, 512, 1024, 2048, 4096
4055 *
c9b378c7 4056 * If possible sets maximum memory read request in bytes
d556ad4b
PO
4057 */
4058int pcie_set_readrq(struct pci_dev *dev, int rq)
4059{
59875ae4 4060 u16 v;
d556ad4b 4061
229f5afd 4062 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
59875ae4 4063 return -EINVAL;
d556ad4b 4064
a1c473aa
BH
4065 /*
4066 * If using the "performance" PCIe config, we clamp the
4067 * read rq size to the max packet size to prevent the
4068 * host bridge generating requests larger than we can
4069 * cope with
4070 */
4071 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4072 int mps = pcie_get_mps(dev);
4073
a1c473aa
BH
4074 if (mps < rq)
4075 rq = mps;
4076 }
4077
4078 v = (ffs(rq) - 8) << 12;
d556ad4b 4079
59875ae4
JL
4080 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4081 PCI_EXP_DEVCTL_READRQ, v);
d556ad4b
PO
4082}
4083EXPORT_SYMBOL(pcie_set_readrq);
4084
b03e7495
JM
4085/**
4086 * pcie_get_mps - get PCI Express maximum payload size
4087 * @dev: PCI device to query
4088 *
4089 * Returns maximum payload size in bytes
b03e7495
JM
4090 */
4091int pcie_get_mps(struct pci_dev *dev)
4092{
b03e7495
JM
4093 u16 ctl;
4094
59875ae4 4095 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
b03e7495 4096
59875ae4 4097 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
b03e7495 4098}
f1c66c46 4099EXPORT_SYMBOL(pcie_get_mps);
b03e7495
JM
4100
4101/**
4102 * pcie_set_mps - set PCI Express maximum payload size
4103 * @dev: PCI device to query
47c08f31 4104 * @mps: maximum payload size in bytes
b03e7495
JM
4105 * valid values are 128, 256, 512, 1024, 2048, 4096
4106 *
4107 * If possible sets maximum payload size
4108 */
4109int pcie_set_mps(struct pci_dev *dev, int mps)
4110{
59875ae4 4111 u16 v;
b03e7495
JM
4112
4113 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
59875ae4 4114 return -EINVAL;
b03e7495
JM
4115
4116 v = ffs(mps) - 8;
f7625980 4117 if (v > dev->pcie_mpss)
59875ae4 4118 return -EINVAL;
b03e7495
JM
4119 v <<= 5;
4120
59875ae4
JL
4121 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4122 PCI_EXP_DEVCTL_PAYLOAD, v);
b03e7495 4123}
f1c66c46 4124EXPORT_SYMBOL(pcie_set_mps);
b03e7495 4125
81377c8d
JK
4126/**
4127 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4128 * @dev: PCI device to query
4129 * @speed: storage for minimum speed
4130 * @width: storage for minimum width
4131 *
4132 * This function will walk up the PCI device chain and determine the minimum
4133 * link width and speed of the device.
4134 */
4135int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4136 enum pcie_link_width *width)
4137{
4138 int ret;
4139
4140 *speed = PCI_SPEED_UNKNOWN;
4141 *width = PCIE_LNK_WIDTH_UNKNOWN;
4142
4143 while (dev) {
4144 u16 lnksta;
4145 enum pci_bus_speed next_speed;
4146 enum pcie_link_width next_width;
4147
4148 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4149 if (ret)
4150 return ret;
4151
4152 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4153 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4154 PCI_EXP_LNKSTA_NLW_SHIFT;
4155
4156 if (next_speed < *speed)
4157 *speed = next_speed;
4158
4159 if (next_width < *width)
4160 *width = next_width;
4161
4162 dev = dev->bus->self;
4163 }
4164
4165 return 0;
4166}
4167EXPORT_SYMBOL(pcie_get_minimum_link);
4168
c87deff7
HS
4169/**
4170 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 4171 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
4172 * @flags: resource type mask to be selected
4173 *
4174 * This helper routine makes bar mask from the type of resource.
4175 */
4176int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4177{
4178 int i, bars = 0;
4179 for (i = 0; i < PCI_NUM_RESOURCES; i++)
4180 if (pci_resource_flags(dev, i) & flags)
4181 bars |= (1 << i);
4182 return bars;
4183}
b7fe9434 4184EXPORT_SYMBOL(pci_select_bars);
c87deff7 4185
613e7ed6
YZ
4186/**
4187 * pci_resource_bar - get position of the BAR associated with a resource
4188 * @dev: the PCI device
4189 * @resno: the resource number
4190 * @type: the BAR type to be filled in
4191 *
4192 * Returns BAR position in config space, or 0 if the BAR is invalid.
4193 */
4194int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4195{
d1b054da
YZ
4196 int reg;
4197
613e7ed6
YZ
4198 if (resno < PCI_ROM_RESOURCE) {
4199 *type = pci_bar_unknown;
4200 return PCI_BASE_ADDRESS_0 + 4 * resno;
4201 } else if (resno == PCI_ROM_RESOURCE) {
4202 *type = pci_bar_mem32;
4203 return dev->rom_base_reg;
d1b054da
YZ
4204 } else if (resno < PCI_BRIDGE_RESOURCES) {
4205 /* device specific resource */
26ff46c6
MS
4206 *type = pci_bar_unknown;
4207 reg = pci_iov_resource_bar(dev, resno);
d1b054da
YZ
4208 if (reg)
4209 return reg;
613e7ed6
YZ
4210 }
4211
865df576 4212 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
4213 return 0;
4214}
4215
95a8b6ef
MT
4216/* Some architectures require additional programming to enable VGA */
4217static arch_set_vga_state_t arch_set_vga_state;
4218
4219void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4220{
4221 arch_set_vga_state = func; /* NULL disables */
4222}
4223
4224static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3c78bc61 4225 unsigned int command_bits, u32 flags)
95a8b6ef
MT
4226{
4227 if (arch_set_vga_state)
4228 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 4229 flags);
95a8b6ef
MT
4230 return 0;
4231}
4232
deb2d2ec
BH
4233/**
4234 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
4235 * @dev: the PCI device
4236 * @decode: true = enable decoding, false = disable decoding
4237 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 4238 * @flags: traverse ancestors and change bridges
3448a19d 4239 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
4240 */
4241int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 4242 unsigned int command_bits, u32 flags)
deb2d2ec
BH
4243{
4244 struct pci_bus *bus;
4245 struct pci_dev *bridge;
4246 u16 cmd;
95a8b6ef 4247 int rc;
deb2d2ec 4248
67ebd814 4249 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 4250
95a8b6ef 4251 /* ARCH specific VGA enables */
3448a19d 4252 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
4253 if (rc)
4254 return rc;
4255
3448a19d
DA
4256 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4257 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4258 if (decode == true)
4259 cmd |= command_bits;
4260 else
4261 cmd &= ~command_bits;
4262 pci_write_config_word(dev, PCI_COMMAND, cmd);
4263 }
deb2d2ec 4264
3448a19d 4265 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
4266 return 0;
4267
4268 bus = dev->bus;
4269 while (bus) {
4270 bridge = bus->self;
4271 if (bridge) {
4272 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4273 &cmd);
4274 if (decode == true)
4275 cmd |= PCI_BRIDGE_CTL_VGA;
4276 else
4277 cmd &= ~PCI_BRIDGE_CTL_VGA;
4278 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4279 cmd);
4280 }
4281 bus = bus->parent;
4282 }
4283 return 0;
4284}
4285
8496e85c
RW
4286bool pci_device_is_present(struct pci_dev *pdev)
4287{
4288 u32 v;
4289
4290 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4291}
4292EXPORT_SYMBOL_GPL(pci_device_is_present);
4293
08249651
RW
4294void pci_ignore_hotplug(struct pci_dev *dev)
4295{
4296 struct pci_dev *bridge = dev->bus->self;
4297
4298 dev->ignore_hotplug = 1;
4299 /* Propagate the "ignore hotplug" setting to the parent bridge. */
4300 if (bridge)
4301 bridge->ignore_hotplug = 1;
4302}
4303EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
4304
32a9a682
YS
4305#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4306static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 4307static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
4308
4309/**
4310 * pci_specified_resource_alignment - get resource alignment specified by user.
4311 * @dev: the PCI device to get
4312 *
4313 * RETURNS: Resource alignment if it is specified.
4314 * Zero if it is not specified.
4315 */
9738abed 4316static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
32a9a682
YS
4317{
4318 int seg, bus, slot, func, align_order, count;
4319 resource_size_t align = 0;
4320 char *p;
4321
4322 spin_lock(&resource_alignment_lock);
4323 p = resource_alignment_param;
4324 while (*p) {
4325 count = 0;
4326 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
4327 p[count] == '@') {
4328 p += count + 1;
4329 } else {
4330 align_order = -1;
4331 }
4332 if (sscanf(p, "%x:%x:%x.%x%n",
4333 &seg, &bus, &slot, &func, &count) != 4) {
4334 seg = 0;
4335 if (sscanf(p, "%x:%x.%x%n",
4336 &bus, &slot, &func, &count) != 3) {
4337 /* Invalid format */
4338 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
4339 p);
4340 break;
4341 }
4342 }
4343 p += count;
4344 if (seg == pci_domain_nr(dev->bus) &&
4345 bus == dev->bus->number &&
4346 slot == PCI_SLOT(dev->devfn) &&
4347 func == PCI_FUNC(dev->devfn)) {
3c78bc61 4348 if (align_order == -1)
32a9a682 4349 align = PAGE_SIZE;
3c78bc61 4350 else
32a9a682 4351 align = 1 << align_order;
32a9a682
YS
4352 /* Found */
4353 break;
4354 }
4355 if (*p != ';' && *p != ',') {
4356 /* End of param or invalid format */
4357 break;
4358 }
4359 p++;
4360 }
4361 spin_unlock(&resource_alignment_lock);
4362 return align;
4363}
4364
2069ecfb
YL
4365/*
4366 * This function disables memory decoding and releases memory resources
4367 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
4368 * It also rounds up size to specified alignment.
4369 * Later on, the kernel will assign page-aligned memory resource back
4370 * to the device.
4371 */
4372void pci_reassigndev_resource_alignment(struct pci_dev *dev)
4373{
4374 int i;
4375 struct resource *r;
4376 resource_size_t align, size;
4377 u16 command;
4378
10c463a7
YL
4379 /* check if specified PCI is target device to reassign */
4380 align = pci_specified_resource_alignment(dev);
4381 if (!align)
2069ecfb
YL
4382 return;
4383
4384 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
4385 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
4386 dev_warn(&dev->dev,
4387 "Can't reassign resources to host bridge.\n");
4388 return;
4389 }
4390
4391 dev_info(&dev->dev,
4392 "Disabling memory decoding and releasing memory resources.\n");
4393 pci_read_config_word(dev, PCI_COMMAND, &command);
4394 command &= ~PCI_COMMAND_MEMORY;
4395 pci_write_config_word(dev, PCI_COMMAND, command);
4396
2069ecfb
YL
4397 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
4398 r = &dev->resource[i];
4399 if (!(r->flags & IORESOURCE_MEM))
4400 continue;
4401 size = resource_size(r);
4402 if (size < align) {
4403 size = align;
4404 dev_info(&dev->dev,
4405 "Rounding up size of resource #%d to %#llx.\n",
4406 i, (unsigned long long)size);
4407 }
bd064f0a 4408 r->flags |= IORESOURCE_UNSET;
2069ecfb
YL
4409 r->end = size - 1;
4410 r->start = 0;
4411 }
4412 /* Need to disable bridge's resource window,
4413 * to enable the kernel to reassign new resource
4414 * window later on.
4415 */
4416 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4417 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
4418 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
4419 r = &dev->resource[i];
4420 if (!(r->flags & IORESOURCE_MEM))
4421 continue;
bd064f0a 4422 r->flags |= IORESOURCE_UNSET;
2069ecfb
YL
4423 r->end = resource_size(r) - 1;
4424 r->start = 0;
4425 }
4426 pci_disable_bridge_window(dev);
4427 }
4428}
4429
9738abed 4430static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
32a9a682
YS
4431{
4432 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
4433 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
4434 spin_lock(&resource_alignment_lock);
4435 strncpy(resource_alignment_param, buf, count);
4436 resource_alignment_param[count] = '\0';
4437 spin_unlock(&resource_alignment_lock);
4438 return count;
4439}
4440
9738abed 4441static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
32a9a682
YS
4442{
4443 size_t count;
4444 spin_lock(&resource_alignment_lock);
4445 count = snprintf(buf, size, "%s", resource_alignment_param);
4446 spin_unlock(&resource_alignment_lock);
4447 return count;
4448}
4449
4450static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
4451{
4452 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
4453}
4454
4455static ssize_t pci_resource_alignment_store(struct bus_type *bus,
4456 const char *buf, size_t count)
4457{
4458 return pci_set_resource_alignment_param(buf, count);
4459}
4460
4461BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
4462 pci_resource_alignment_store);
4463
4464static int __init pci_resource_alignment_sysfs_init(void)
4465{
4466 return bus_create_file(&pci_bus_type,
4467 &bus_attr_resource_alignment);
4468}
32a9a682
YS
4469late_initcall(pci_resource_alignment_sysfs_init);
4470
15856ad5 4471static void pci_no_domains(void)
32a2eea7
JG
4472{
4473#ifdef CONFIG_PCI_DOMAINS
4474 pci_domains_supported = 0;
4475#endif
4476}
4477
41e5c0f8
LD
4478#ifdef CONFIG_PCI_DOMAINS
4479static atomic_t __domain_nr = ATOMIC_INIT(-1);
4480
4481int pci_get_new_domain_nr(void)
4482{
4483 return atomic_inc_return(&__domain_nr);
4484}
7c674700
LP
4485
4486#ifdef CONFIG_PCI_DOMAINS_GENERIC
4487void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4488{
4489 static int use_dt_domains = -1;
4490 int domain = of_get_pci_domain_nr(parent->of_node);
4491
4492 /*
4493 * Check DT domain and use_dt_domains values.
4494 *
4495 * If DT domain property is valid (domain >= 0) and
4496 * use_dt_domains != 0, the DT assignment is valid since this means
4497 * we have not previously allocated a domain number by using
4498 * pci_get_new_domain_nr(); we should also update use_dt_domains to
4499 * 1, to indicate that we have just assigned a domain number from
4500 * DT.
4501 *
4502 * If DT domain property value is not valid (ie domain < 0), and we
4503 * have not previously assigned a domain number from DT
4504 * (use_dt_domains != 1) we should assign a domain number by
4505 * using the:
4506 *
4507 * pci_get_new_domain_nr()
4508 *
4509 * API and update the use_dt_domains value to keep track of method we
4510 * are using to assign domain numbers (use_dt_domains = 0).
4511 *
4512 * All other combinations imply we have a platform that is trying
4513 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
4514 * which is a recipe for domain mishandling and it is prevented by
4515 * invalidating the domain value (domain = -1) and printing a
4516 * corresponding error.
4517 */
4518 if (domain >= 0 && use_dt_domains) {
4519 use_dt_domains = 1;
4520 } else if (domain < 0 && use_dt_domains != 1) {
4521 use_dt_domains = 0;
4522 domain = pci_get_new_domain_nr();
4523 } else {
4524 dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
4525 parent->of_node->full_name);
4526 domain = -1;
4527 }
4528
4529 bus->domain_nr = domain;
4530}
4531#endif
41e5c0f8
LD
4532#endif
4533
0ef5f8f6 4534/**
642c92da 4535 * pci_ext_cfg_avail - can we access extended PCI config space?
0ef5f8f6
AP
4536 *
4537 * Returns 1 if we can access PCI extended config space (offsets
4538 * greater than 0xff). This is the default implementation. Architecture
4539 * implementations can override this.
4540 */
642c92da 4541int __weak pci_ext_cfg_avail(void)
0ef5f8f6
AP
4542{
4543 return 1;
4544}
4545
2d1c8618
BH
4546void __weak pci_fixup_cardbus(struct pci_bus *bus)
4547{
4548}
4549EXPORT_SYMBOL(pci_fixup_cardbus);
4550
ad04d31e 4551static int __init pci_setup(char *str)
1da177e4
LT
4552{
4553 while (str) {
4554 char *k = strchr(str, ',');
4555 if (k)
4556 *k++ = 0;
4557 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
4558 if (!strcmp(str, "nomsi")) {
4559 pci_no_msi();
7f785763
RD
4560 } else if (!strcmp(str, "noaer")) {
4561 pci_no_aer();
b55438fd
YL
4562 } else if (!strncmp(str, "realloc=", 8)) {
4563 pci_realloc_get_opt(str + 8);
f483d392 4564 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 4565 pci_realloc_get_opt("on");
32a2eea7
JG
4566 } else if (!strcmp(str, "nodomains")) {
4567 pci_no_domains();
6748dcc2
RW
4568 } else if (!strncmp(str, "noari", 5)) {
4569 pcie_ari_disabled = true;
4516a618
AN
4570 } else if (!strncmp(str, "cbiosize=", 9)) {
4571 pci_cardbus_io_size = memparse(str + 9, &str);
4572 } else if (!strncmp(str, "cbmemsize=", 10)) {
4573 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
4574 } else if (!strncmp(str, "resource_alignment=", 19)) {
4575 pci_set_resource_alignment_param(str + 19,
4576 strlen(str + 19));
43c16408
AP
4577 } else if (!strncmp(str, "ecrc=", 5)) {
4578 pcie_ecrc_get_policy(str + 5);
28760489
EB
4579 } else if (!strncmp(str, "hpiosize=", 9)) {
4580 pci_hotplug_io_size = memparse(str + 9, &str);
4581 } else if (!strncmp(str, "hpmemsize=", 10)) {
4582 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
4583 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4584 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
4585 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
4586 pcie_bus_config = PCIE_BUS_SAFE;
4587 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
4588 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
4589 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4590 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
4591 } else if (!strncmp(str, "pcie_scan_all", 13)) {
4592 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
4593 } else {
4594 printk(KERN_ERR "PCI: Unknown option `%s'\n",
4595 str);
4596 }
1da177e4
LT
4597 }
4598 str = k;
4599 }
0637a70a 4600 return 0;
1da177e4 4601}
0637a70a 4602early_param("pci", pci_setup);