]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/pci/pci.c
PCI/ACPI: Add _PRT interrupt routing info before enumerating devices
[mirror_ubuntu-artful-kernel.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
284f5f9d 25#include <asm-generic/pci-bridge.h>
32a9a682 26#include <asm/setup.h>
bc56b9e0 27#include "pci.h"
1da177e4 28
00240c38
AS
29const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31};
32EXPORT_SYMBOL_GPL(pci_power_names);
33
93177a74
RW
34int isa_dma_bridge_buggy;
35EXPORT_SYMBOL(isa_dma_bridge_buggy);
36
37int pci_pci_problems;
38EXPORT_SYMBOL(pci_pci_problems);
39
1ae861e6
RW
40unsigned int pci_pm_d3_delay;
41
df17e62e
MG
42static void pci_pme_list_scan(struct work_struct *work);
43
44static LIST_HEAD(pci_pme_list);
45static DEFINE_MUTEX(pci_pme_list_mutex);
46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47
48struct pci_pme_device {
49 struct list_head list;
50 struct pci_dev *dev;
51};
52
53#define PME_TIMEOUT 1000 /* How long between PME checks */
54
1ae861e6
RW
55static void pci_dev_d3_sleep(struct pci_dev *dev)
56{
57 unsigned int delay = dev->d3_delay;
58
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
61
62 msleep(delay);
63}
1da177e4 64
32a2eea7
JG
65#ifdef CONFIG_PCI_DOMAINS
66int pci_domains_supported = 1;
67#endif
68
4516a618
AN
69#define DEFAULT_CARDBUS_IO_SIZE (256)
70#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74
28760489
EB
75#define DEFAULT_HOTPLUG_IO_SIZE (256)
76#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
78unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80
5f39e670 81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 82
ac1aa47b
JB
83/*
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
88 */
98e724c7 89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
90u8 pci_cache_line_size;
91
96c55900
MS
92/*
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
95 */
96unsigned int pcibios_max_latency = 255;
97
6748dcc2
RW
98/* If set, the PCIe ARI capability will not be used. */
99static bool pcie_ari_disabled;
100
1da177e4
LT
101/**
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
104 *
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
107 */
96bde06a 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4
LT
109{
110 struct list_head *tmp;
111 unsigned char max, n;
112
b918c62e 113 max = bus->busn_res.end;
1da177e4
LT
114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max)
117 max = n;
118 }
119 return max;
120}
b82db5ce 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 122
1684f5dd
AM
123#ifdef CONFIG_HAS_IOMEM
124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125{
126 /*
127 * Make sure the BAR is actually a memory resource, not an IO resource
128 */
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 WARN_ON(1);
131 return NULL;
132 }
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
135}
136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif
138
687d5fe3
ME
139#define PCI_FIND_CAP_TTL 48
140
141static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
142 u8 pos, int cap, int *ttl)
24a4e377
RD
143{
144 u8 id;
24a4e377 145
687d5fe3 146 while ((*ttl)--) {
24a4e377
RD
147 pci_bus_read_config_byte(bus, devfn, pos, &pos);
148 if (pos < 0x40)
149 break;
150 pos &= ~3;
151 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
152 &id);
153 if (id == 0xff)
154 break;
155 if (id == cap)
156 return pos;
157 pos += PCI_CAP_LIST_NEXT;
158 }
159 return 0;
160}
161
687d5fe3
ME
162static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
163 u8 pos, int cap)
164{
165 int ttl = PCI_FIND_CAP_TTL;
166
167 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
168}
169
24a4e377
RD
170int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
171{
172 return __pci_find_next_cap(dev->bus, dev->devfn,
173 pos + PCI_CAP_LIST_NEXT, cap);
174}
175EXPORT_SYMBOL_GPL(pci_find_next_capability);
176
d3bac118
ME
177static int __pci_bus_find_cap_start(struct pci_bus *bus,
178 unsigned int devfn, u8 hdr_type)
1da177e4
LT
179{
180 u16 status;
1da177e4
LT
181
182 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
183 if (!(status & PCI_STATUS_CAP_LIST))
184 return 0;
185
186 switch (hdr_type) {
187 case PCI_HEADER_TYPE_NORMAL:
188 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 189 return PCI_CAPABILITY_LIST;
1da177e4 190 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 191 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
192 default:
193 return 0;
194 }
d3bac118
ME
195
196 return 0;
1da177e4
LT
197}
198
199/**
200 * pci_find_capability - query for devices' capabilities
201 * @dev: PCI device to query
202 * @cap: capability code
203 *
204 * Tell if a device supports a given PCI capability.
205 * Returns the address of the requested capability structure within the
206 * device's PCI configuration space or 0 in case the device does not
207 * support it. Possible values for @cap:
208 *
209 * %PCI_CAP_ID_PM Power Management
210 * %PCI_CAP_ID_AGP Accelerated Graphics Port
211 * %PCI_CAP_ID_VPD Vital Product Data
212 * %PCI_CAP_ID_SLOTID Slot Identification
213 * %PCI_CAP_ID_MSI Message Signalled Interrupts
214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
215 * %PCI_CAP_ID_PCIX PCI-X
216 * %PCI_CAP_ID_EXP PCI Express
217 */
218int pci_find_capability(struct pci_dev *dev, int cap)
219{
d3bac118
ME
220 int pos;
221
222 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
223 if (pos)
224 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
225
226 return pos;
1da177e4
LT
227}
228
229/**
230 * pci_bus_find_capability - query for devices' capabilities
231 * @bus: the PCI bus to query
232 * @devfn: PCI device to query
233 * @cap: capability code
234 *
235 * Like pci_find_capability() but works for pci devices that do not have a
236 * pci_dev structure set up yet.
237 *
238 * Returns the address of the requested capability structure within the
239 * device's PCI configuration space or 0 in case the device does not
240 * support it.
241 */
242int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
243{
d3bac118 244 int pos;
1da177e4
LT
245 u8 hdr_type;
246
247 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
248
d3bac118
ME
249 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
250 if (pos)
251 pos = __pci_find_next_cap(bus, devfn, pos, cap);
252
253 return pos;
1da177e4
LT
254}
255
256/**
44a9a36f 257 * pci_find_next_ext_capability - Find an extended capability
1da177e4 258 * @dev: PCI device to query
44a9a36f 259 * @start: address at which to start looking (0 to start at beginning of list)
1da177e4
LT
260 * @cap: capability code
261 *
44a9a36f 262 * Returns the address of the next matching extended capability structure
1da177e4 263 * within the device's PCI configuration space or 0 if the device does
44a9a36f
BH
264 * not support it. Some capabilities can occur several times, e.g., the
265 * vendor-specific capability, and this provides a way to find them all.
1da177e4 266 */
44a9a36f 267int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
1da177e4
LT
268{
269 u32 header;
557848c3
ZY
270 int ttl;
271 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 272
557848c3
ZY
273 /* minimum 8 bytes per capability */
274 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
275
276 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
277 return 0;
278
44a9a36f
BH
279 if (start)
280 pos = start;
281
1da177e4
LT
282 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
283 return 0;
284
285 /*
286 * If we have no capabilities, this is indicated by cap ID,
287 * cap version and next pointer all being 0.
288 */
289 if (header == 0)
290 return 0;
291
292 while (ttl-- > 0) {
44a9a36f 293 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1da177e4
LT
294 return pos;
295
296 pos = PCI_EXT_CAP_NEXT(header);
557848c3 297 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
298 break;
299
300 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
301 break;
302 }
303
304 return 0;
305}
44a9a36f
BH
306EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
307
308/**
309 * pci_find_ext_capability - Find an extended capability
310 * @dev: PCI device to query
311 * @cap: capability code
312 *
313 * Returns the address of the requested extended capability structure
314 * within the device's PCI configuration space or 0 if the device does
315 * not support it. Possible values for @cap:
316 *
317 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
318 * %PCI_EXT_CAP_ID_VC Virtual Channel
319 * %PCI_EXT_CAP_ID_DSN Device Serial Number
320 * %PCI_EXT_CAP_ID_PWR Power Budgeting
321 */
322int pci_find_ext_capability(struct pci_dev *dev, int cap)
323{
324 return pci_find_next_ext_capability(dev, 0, cap);
325}
3a720d72 326EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 327
687d5fe3
ME
328static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
329{
330 int rc, ttl = PCI_FIND_CAP_TTL;
331 u8 cap, mask;
332
333 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
334 mask = HT_3BIT_CAP_MASK;
335 else
336 mask = HT_5BIT_CAP_MASK;
337
338 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
339 PCI_CAP_ID_HT, &ttl);
340 while (pos) {
341 rc = pci_read_config_byte(dev, pos + 3, &cap);
342 if (rc != PCIBIOS_SUCCESSFUL)
343 return 0;
344
345 if ((cap & mask) == ht_cap)
346 return pos;
347
47a4d5be
BG
348 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
349 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
350 PCI_CAP_ID_HT, &ttl);
351 }
352
353 return 0;
354}
355/**
356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
357 * @dev: PCI device to query
358 * @pos: Position from which to continue searching
359 * @ht_cap: Hypertransport capability code
360 *
361 * To be used in conjunction with pci_find_ht_capability() to search for
362 * all capabilities matching @ht_cap. @pos should always be a value returned
363 * from pci_find_ht_capability().
364 *
365 * NB. To be 100% safe against broken PCI devices, the caller should take
366 * steps to avoid an infinite loop.
367 */
368int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
369{
370 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
371}
372EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
373
374/**
375 * pci_find_ht_capability - query a device's Hypertransport capabilities
376 * @dev: PCI device to query
377 * @ht_cap: Hypertransport capability code
378 *
379 * Tell if a device supports a given Hypertransport capability.
380 * Returns an address within the device's PCI configuration space
381 * or 0 in case the device does not support the request capability.
382 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
383 * which has a Hypertransport capability matching @ht_cap.
384 */
385int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
386{
387 int pos;
388
389 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
390 if (pos)
391 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
392
393 return pos;
394}
395EXPORT_SYMBOL_GPL(pci_find_ht_capability);
396
1da177e4
LT
397/**
398 * pci_find_parent_resource - return resource region of parent bus of given region
399 * @dev: PCI device structure contains resources to be searched
400 * @res: child resource record for which parent is sought
401 *
402 * For given resource region of given device, return the resource
403 * region of parent bus the given region is contained in or where
404 * it should be allocated from.
405 */
406struct resource *
407pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
408{
409 const struct pci_bus *bus = dev->bus;
410 int i;
89a74ecc 411 struct resource *best = NULL, *r;
1da177e4 412
89a74ecc 413 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
414 if (!r)
415 continue;
416 if (res->start && !(res->start >= r->start && res->end <= r->end))
417 continue; /* Not contained */
418 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
419 continue; /* Wrong type */
420 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
421 return r; /* Exact match */
8c8def26
LT
422 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
423 if (r->flags & IORESOURCE_PREFETCH)
424 continue;
425 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
426 if (!best)
427 best = r;
1da177e4
LT
428 }
429 return best;
430}
431
064b53db
JL
432/**
433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
434 * @dev: PCI device to have its BARs restored
435 *
436 * Restore the BAR values for a given device, so as to make it
437 * accessible by its driver.
438 */
ad668599 439static void
064b53db
JL
440pci_restore_bars(struct pci_dev *dev)
441{
bc5f5a82 442 int i;
064b53db 443
bc5f5a82 444 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 445 pci_update_resource(dev, i);
064b53db
JL
446}
447
961d9120
RW
448static struct pci_platform_pm_ops *pci_platform_pm;
449
450int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
451{
eb9d0fe4
RW
452 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
453 || !ops->sleep_wake || !ops->can_wakeup)
961d9120
RW
454 return -EINVAL;
455 pci_platform_pm = ops;
456 return 0;
457}
458
459static inline bool platform_pci_power_manageable(struct pci_dev *dev)
460{
461 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
462}
463
464static inline int platform_pci_set_power_state(struct pci_dev *dev,
465 pci_power_t t)
466{
467 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
468}
469
470static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
471{
472 return pci_platform_pm ?
473 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
474}
8f7020d3 475
eb9d0fe4
RW
476static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
477{
478 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
479}
480
481static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
482{
483 return pci_platform_pm ?
484 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
485}
486
b67ea761
RW
487static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
488{
489 return pci_platform_pm ?
490 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
491}
492
1da177e4 493/**
44e4e66e
RW
494 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
495 * given PCI device
496 * @dev: PCI device to handle.
44e4e66e 497 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 498 *
44e4e66e
RW
499 * RETURN VALUE:
500 * -EINVAL if the requested state is invalid.
501 * -EIO if device does not support PCI PM or its PM capabilities register has a
502 * wrong version, or device doesn't support the requested state.
503 * 0 if device already is in the requested state.
504 * 0 if device's power state has been successfully changed.
1da177e4 505 */
f00a20ef 506static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 507{
337001b6 508 u16 pmcsr;
44e4e66e 509 bool need_restore = false;
1da177e4 510
4a865905
RW
511 /* Check if we're already there */
512 if (dev->current_state == state)
513 return 0;
514
337001b6 515 if (!dev->pm_cap)
cca03dec
AL
516 return -EIO;
517
44e4e66e
RW
518 if (state < PCI_D0 || state > PCI_D3hot)
519 return -EINVAL;
520
1da177e4
LT
521 /* Validate current state:
522 * Can enter D0 from any state, but if we can only go deeper
523 * to sleep if we're already in a low power state
524 */
4a865905 525 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 526 && dev->current_state > state) {
80ccba11
BH
527 dev_err(&dev->dev, "invalid power transition "
528 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 529 return -EINVAL;
44e4e66e 530 }
1da177e4 531
1da177e4 532 /* check if this device supports the desired state */
337001b6
RW
533 if ((state == PCI_D1 && !dev->d1_support)
534 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 535 return -EIO;
1da177e4 536
337001b6 537 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 538
32a36585 539 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
540 * This doesn't affect PME_Status, disables PME_En, and
541 * sets PowerState to 0.
542 */
32a36585 543 switch (dev->current_state) {
d3535fbb
JL
544 case PCI_D0:
545 case PCI_D1:
546 case PCI_D2:
547 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
548 pmcsr |= state;
549 break;
f62795f1
RW
550 case PCI_D3hot:
551 case PCI_D3cold:
32a36585
JL
552 case PCI_UNKNOWN: /* Boot-up */
553 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 554 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 555 need_restore = true;
32a36585 556 /* Fall-through: force to D0 */
32a36585 557 default:
d3535fbb 558 pmcsr = 0;
32a36585 559 break;
1da177e4
LT
560 }
561
562 /* enter specified state */
337001b6 563 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
564
565 /* Mandatory power management transition delays */
566 /* see PCI PM 1.1 5.6.1 table 18 */
567 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 568 pci_dev_d3_sleep(dev);
1da177e4 569 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 570 udelay(PCI_PM_D2_DELAY);
1da177e4 571
e13cdbd7
RW
572 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
573 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
574 if (dev->current_state != state && printk_ratelimit())
575 dev_info(&dev->dev, "Refused to change power state, "
576 "currently in D%d\n", dev->current_state);
064b53db 577
448bd857
HY
578 /*
579 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
064b53db
JL
580 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
581 * from D3hot to D0 _may_ perform an internal reset, thereby
582 * going to "D0 Uninitialized" rather than "D0 Initialized".
583 * For example, at least some versions of the 3c905B and the
584 * 3c556B exhibit this behaviour.
585 *
586 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
587 * devices in a D3hot state at boot. Consequently, we need to
588 * restore at least the BARs so that the device will be
589 * accessible to its driver.
590 */
591 if (need_restore)
592 pci_restore_bars(dev);
593
f00a20ef 594 if (dev->bus->self)
7d715a6c
SL
595 pcie_aspm_pm_state_change(dev->bus->self);
596
1da177e4
LT
597 return 0;
598}
599
44e4e66e
RW
600/**
601 * pci_update_current_state - Read PCI power state of given device from its
602 * PCI PM registers and cache it
603 * @dev: PCI device to handle.
f06fc0b6 604 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 605 */
73410429 606void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 607{
337001b6 608 if (dev->pm_cap) {
44e4e66e
RW
609 u16 pmcsr;
610
448bd857
HY
611 /*
612 * Configuration space is not accessible for device in
613 * D3cold, so just keep or set D3cold for safety
614 */
615 if (dev->current_state == PCI_D3cold)
616 return;
617 if (state == PCI_D3cold) {
618 dev->current_state = PCI_D3cold;
619 return;
620 }
337001b6 621 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 622 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
623 } else {
624 dev->current_state = state;
44e4e66e
RW
625 }
626}
627
db288c9c
RW
628/**
629 * pci_power_up - Put the given device into D0 forcibly
630 * @dev: PCI device to power up
631 */
632void pci_power_up(struct pci_dev *dev)
633{
634 if (platform_pci_power_manageable(dev))
635 platform_pci_set_power_state(dev, PCI_D0);
636
637 pci_raw_set_power_state(dev, PCI_D0);
638 pci_update_current_state(dev, PCI_D0);
639}
640
0e5dd46b
RW
641/**
642 * pci_platform_power_transition - Use platform to change device power state
643 * @dev: PCI device to handle.
644 * @state: State to put the device into.
645 */
646static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
647{
648 int error;
649
650 if (platform_pci_power_manageable(dev)) {
651 error = platform_pci_set_power_state(dev, state);
652 if (!error)
653 pci_update_current_state(dev, state);
b51306c6
AH
654 /* Fall back to PCI_D0 if native PM is not supported */
655 if (!dev->pm_cap)
656 dev->current_state = PCI_D0;
0e5dd46b
RW
657 } else {
658 error = -ENODEV;
659 /* Fall back to PCI_D0 if native PM is not supported */
b3bad72e
RW
660 if (!dev->pm_cap)
661 dev->current_state = PCI_D0;
0e5dd46b
RW
662 }
663
664 return error;
665}
666
667/**
668 * __pci_start_power_transition - Start power transition of a PCI device
669 * @dev: PCI device to handle.
670 * @state: State to put the device into.
671 */
672static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
673{
448bd857 674 if (state == PCI_D0) {
0e5dd46b 675 pci_platform_power_transition(dev, PCI_D0);
448bd857
HY
676 /*
677 * Mandatory power management transition delays, see
678 * PCI Express Base Specification Revision 2.0 Section
679 * 6.6.1: Conventional Reset. Do not delay for
680 * devices powered on/off by corresponding bridge,
681 * because have already delayed for the bridge.
682 */
683 if (dev->runtime_d3cold) {
684 msleep(dev->d3cold_delay);
685 /*
686 * When powering on a bridge from D3cold, the
687 * whole hierarchy may be powered on into
688 * D0uninitialized state, resume them to give
689 * them a chance to suspend again
690 */
691 pci_wakeup_bus(dev->subordinate);
692 }
693 }
694}
695
696/**
697 * __pci_dev_set_current_state - Set current state of a PCI device
698 * @dev: Device to handle
699 * @data: pointer to state to be set
700 */
701static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
702{
703 pci_power_t state = *(pci_power_t *)data;
704
705 dev->current_state = state;
706 return 0;
707}
708
709/**
710 * __pci_bus_set_current_state - Walk given bus and set current state of devices
711 * @bus: Top bus of the subtree to walk.
712 * @state: state to be set
713 */
714static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
715{
716 if (bus)
717 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
0e5dd46b
RW
718}
719
720/**
721 * __pci_complete_power_transition - Complete power transition of a PCI device
722 * @dev: PCI device to handle.
723 * @state: State to put the device into.
724 *
725 * This function should not be called directly by device drivers.
726 */
727int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
728{
448bd857
HY
729 int ret;
730
db288c9c 731 if (state <= PCI_D0)
448bd857
HY
732 return -EINVAL;
733 ret = pci_platform_power_transition(dev, state);
734 /* Power off the bridge may power off the whole hierarchy */
735 if (!ret && state == PCI_D3cold)
736 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
737 return ret;
0e5dd46b
RW
738}
739EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
740
44e4e66e
RW
741/**
742 * pci_set_power_state - Set the power state of a PCI device
743 * @dev: PCI device to handle.
744 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
745 *
877d0310 746 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
747 * the device's PCI PM registers.
748 *
749 * RETURN VALUE:
750 * -EINVAL if the requested state is invalid.
751 * -EIO if device does not support PCI PM or its PM capabilities register has a
752 * wrong version, or device doesn't support the requested state.
753 * 0 if device already is in the requested state.
754 * 0 if device's power state has been successfully changed.
755 */
756int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
757{
337001b6 758 int error;
44e4e66e
RW
759
760 /* bound the state we're entering */
448bd857
HY
761 if (state > PCI_D3cold)
762 state = PCI_D3cold;
44e4e66e
RW
763 else if (state < PCI_D0)
764 state = PCI_D0;
765 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
766 /*
767 * If the device or the parent bridge do not support PCI PM,
768 * ignore the request if we're doing anything other than putting
769 * it into D0 (which would only happen on boot).
770 */
771 return 0;
772
db288c9c
RW
773 /* Check if we're already there */
774 if (dev->current_state == state)
775 return 0;
776
0e5dd46b
RW
777 __pci_start_power_transition(dev, state);
778
979b1791
AC
779 /* This device is quirked not to be put into D3, so
780 don't put it in D3 */
448bd857 781 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
979b1791 782 return 0;
44e4e66e 783
448bd857
HY
784 /*
785 * To put device in D3cold, we put device into D3hot in native
786 * way, then put device into D3cold with platform ops
787 */
788 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
789 PCI_D3hot : state);
44e4e66e 790
0e5dd46b
RW
791 if (!__pci_complete_power_transition(dev, state))
792 error = 0;
1a680b7c
NC
793 /*
794 * When aspm_policy is "powersave" this call ensures
795 * that ASPM is configured.
796 */
797 if (!error && dev->bus->self)
798 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
799
800 return error;
801}
802
1da177e4
LT
803/**
804 * pci_choose_state - Choose the power state of a PCI device
805 * @dev: PCI device to be suspended
806 * @state: target sleep state for the whole system. This is the value
807 * that is passed to suspend() function.
808 *
809 * Returns PCI power state suitable for given device and given system
810 * message.
811 */
812
813pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
814{
ab826ca4 815 pci_power_t ret;
0f64474b 816
1da177e4
LT
817 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
818 return PCI_D0;
819
961d9120
RW
820 ret = platform_pci_choose_state(dev);
821 if (ret != PCI_POWER_ERROR)
822 return ret;
ca078bae
PM
823
824 switch (state.event) {
825 case PM_EVENT_ON:
826 return PCI_D0;
827 case PM_EVENT_FREEZE:
b887d2e6
DB
828 case PM_EVENT_PRETHAW:
829 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 830 case PM_EVENT_SUSPEND:
3a2d5b70 831 case PM_EVENT_HIBERNATE:
ca078bae 832 return PCI_D3hot;
1da177e4 833 default:
80ccba11
BH
834 dev_info(&dev->dev, "unrecognized suspend event %d\n",
835 state.event);
1da177e4
LT
836 BUG();
837 }
838 return PCI_D0;
839}
840
841EXPORT_SYMBOL(pci_choose_state);
842
89858517
YZ
843#define PCI_EXP_SAVE_REGS 7
844
1b6b8ce2 845
34a4876e
YL
846static struct pci_cap_saved_state *pci_find_saved_cap(
847 struct pci_dev *pci_dev, char cap)
848{
849 struct pci_cap_saved_state *tmp;
850 struct hlist_node *pos;
851
852 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
853 if (tmp->cap.cap_nr == cap)
854 return tmp;
855 }
856 return NULL;
857}
858
b56a5a23
MT
859static int pci_save_pcie_state(struct pci_dev *dev)
860{
59875ae4 861 int i = 0;
b56a5a23
MT
862 struct pci_cap_saved_state *save_state;
863 u16 *cap;
864
59875ae4 865 if (!pci_is_pcie(dev))
b56a5a23
MT
866 return 0;
867
9f35575d 868 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 869 if (!save_state) {
e496b617 870 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
871 return -ENOMEM;
872 }
63f4898a 873
59875ae4
JL
874 cap = (u16 *)&save_state->cap.data[0];
875 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
876 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
877 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
878 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
879 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
880 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
881 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
9cb604ed 882
b56a5a23
MT
883 return 0;
884}
885
886static void pci_restore_pcie_state(struct pci_dev *dev)
887{
59875ae4 888 int i = 0;
b56a5a23
MT
889 struct pci_cap_saved_state *save_state;
890 u16 *cap;
891
892 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
59875ae4 893 if (!save_state)
9cb604ed
MS
894 return;
895
59875ae4
JL
896 cap = (u16 *)&save_state->cap.data[0];
897 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
898 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
899 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
900 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
901 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
902 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
903 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
904}
905
cc692a5f
SH
906
907static int pci_save_pcix_state(struct pci_dev *dev)
908{
63f4898a 909 int pos;
cc692a5f 910 struct pci_cap_saved_state *save_state;
cc692a5f
SH
911
912 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
913 if (pos <= 0)
914 return 0;
915
f34303de 916 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 917 if (!save_state) {
e496b617 918 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
919 return -ENOMEM;
920 }
cc692a5f 921
24a4742f
AW
922 pci_read_config_word(dev, pos + PCI_X_CMD,
923 (u16 *)save_state->cap.data);
63f4898a 924
cc692a5f
SH
925 return 0;
926}
927
928static void pci_restore_pcix_state(struct pci_dev *dev)
929{
930 int i = 0, pos;
931 struct pci_cap_saved_state *save_state;
932 u16 *cap;
933
934 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
935 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
936 if (!save_state || pos <= 0)
937 return;
24a4742f 938 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
939
940 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
941}
942
943
1da177e4
LT
944/**
945 * pci_save_state - save the PCI configuration space of a device before suspending
946 * @dev: - PCI device that we're dealing with
1da177e4
LT
947 */
948int
949pci_save_state(struct pci_dev *dev)
950{
951 int i;
952 /* XXX: 100% dword access ok here? */
953 for (i = 0; i < 16; i++)
9e0b5b2c 954 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 955 dev->state_saved = true;
b56a5a23
MT
956 if ((i = pci_save_pcie_state(dev)) != 0)
957 return i;
cc692a5f
SH
958 if ((i = pci_save_pcix_state(dev)) != 0)
959 return i;
1da177e4
LT
960 return 0;
961}
962
ebfc5b80
RW
963static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
964 u32 saved_val, int retry)
965{
966 u32 val;
967
968 pci_read_config_dword(pdev, offset, &val);
969 if (val == saved_val)
970 return;
971
972 for (;;) {
973 dev_dbg(&pdev->dev, "restoring config space at offset "
974 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
975 pci_write_config_dword(pdev, offset, saved_val);
976 if (retry-- <= 0)
977 return;
978
979 pci_read_config_dword(pdev, offset, &val);
980 if (val == saved_val)
981 return;
982
983 mdelay(1);
984 }
985}
986
a6cb9ee7
RW
987static void pci_restore_config_space_range(struct pci_dev *pdev,
988 int start, int end, int retry)
ebfc5b80
RW
989{
990 int index;
991
992 for (index = end; index >= start; index--)
993 pci_restore_config_dword(pdev, 4 * index,
994 pdev->saved_config_space[index],
995 retry);
996}
997
a6cb9ee7
RW
998static void pci_restore_config_space(struct pci_dev *pdev)
999{
1000 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1001 pci_restore_config_space_range(pdev, 10, 15, 0);
1002 /* Restore BARs before the command register. */
1003 pci_restore_config_space_range(pdev, 4, 9, 10);
1004 pci_restore_config_space_range(pdev, 0, 3, 0);
1005 } else {
1006 pci_restore_config_space_range(pdev, 0, 15, 0);
1007 }
1008}
1009
1da177e4
LT
1010/**
1011 * pci_restore_state - Restore the saved state of a PCI device
1012 * @dev: - PCI device that we're dealing with
1da177e4 1013 */
1d3c16a8 1014void pci_restore_state(struct pci_dev *dev)
1da177e4 1015{
c82f63e4 1016 if (!dev->state_saved)
1d3c16a8 1017 return;
4b77b0a2 1018
b56a5a23
MT
1019 /* PCI Express register must be restored first */
1020 pci_restore_pcie_state(dev);
1900ca13 1021 pci_restore_ats_state(dev);
b56a5a23 1022
a6cb9ee7 1023 pci_restore_config_space(dev);
ebfc5b80 1024
cc692a5f 1025 pci_restore_pcix_state(dev);
41017f0c 1026 pci_restore_msi_state(dev);
8c5cdb6a 1027 pci_restore_iov_state(dev);
8fed4b65 1028
4b77b0a2 1029 dev->state_saved = false;
1da177e4
LT
1030}
1031
ffbdd3f7
AW
1032struct pci_saved_state {
1033 u32 config_space[16];
1034 struct pci_cap_saved_data cap[0];
1035};
1036
1037/**
1038 * pci_store_saved_state - Allocate and return an opaque struct containing
1039 * the device saved state.
1040 * @dev: PCI device that we're dealing with
1041 *
1042 * Rerturn NULL if no state or error.
1043 */
1044struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1045{
1046 struct pci_saved_state *state;
1047 struct pci_cap_saved_state *tmp;
1048 struct pci_cap_saved_data *cap;
1049 struct hlist_node *pos;
1050 size_t size;
1051
1052 if (!dev->state_saved)
1053 return NULL;
1054
1055 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1056
1057 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1058 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1059
1060 state = kzalloc(size, GFP_KERNEL);
1061 if (!state)
1062 return NULL;
1063
1064 memcpy(state->config_space, dev->saved_config_space,
1065 sizeof(state->config_space));
1066
1067 cap = state->cap;
1068 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1069 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1070 memcpy(cap, &tmp->cap, len);
1071 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1072 }
1073 /* Empty cap_save terminates list */
1074
1075 return state;
1076}
1077EXPORT_SYMBOL_GPL(pci_store_saved_state);
1078
1079/**
1080 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1081 * @dev: PCI device that we're dealing with
1082 * @state: Saved state returned from pci_store_saved_state()
1083 */
1084int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1085{
1086 struct pci_cap_saved_data *cap;
1087
1088 dev->state_saved = false;
1089
1090 if (!state)
1091 return 0;
1092
1093 memcpy(dev->saved_config_space, state->config_space,
1094 sizeof(state->config_space));
1095
1096 cap = state->cap;
1097 while (cap->size) {
1098 struct pci_cap_saved_state *tmp;
1099
1100 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1101 if (!tmp || tmp->cap.size != cap->size)
1102 return -EINVAL;
1103
1104 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1105 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1106 sizeof(struct pci_cap_saved_data) + cap->size);
1107 }
1108
1109 dev->state_saved = true;
1110 return 0;
1111}
1112EXPORT_SYMBOL_GPL(pci_load_saved_state);
1113
1114/**
1115 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1116 * and free the memory allocated for it.
1117 * @dev: PCI device that we're dealing with
1118 * @state: Pointer to saved state returned from pci_store_saved_state()
1119 */
1120int pci_load_and_free_saved_state(struct pci_dev *dev,
1121 struct pci_saved_state **state)
1122{
1123 int ret = pci_load_saved_state(dev, *state);
1124 kfree(*state);
1125 *state = NULL;
1126 return ret;
1127}
1128EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1129
38cc1302
HS
1130static int do_pci_enable_device(struct pci_dev *dev, int bars)
1131{
1132 int err;
1133
1134 err = pci_set_power_state(dev, PCI_D0);
1135 if (err < 0 && err != -EIO)
1136 return err;
1137 err = pcibios_enable_device(dev, bars);
1138 if (err < 0)
1139 return err;
1140 pci_fixup_device(pci_fixup_enable, dev);
1141
1142 return 0;
1143}
1144
1145/**
0b62e13b 1146 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1147 * @dev: PCI device to be resumed
1148 *
1149 * Note this function is a backend of pci_default_resume and is not supposed
1150 * to be called by normal code, write proper resume handler and use it instead.
1151 */
0b62e13b 1152int pci_reenable_device(struct pci_dev *dev)
38cc1302 1153{
296ccb08 1154 if (pci_is_enabled(dev))
38cc1302
HS
1155 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1156 return 0;
1157}
1158
b718989d
BH
1159static int __pci_enable_device_flags(struct pci_dev *dev,
1160 resource_size_t flags)
1da177e4
LT
1161{
1162 int err;
b718989d 1163 int i, bars = 0;
1da177e4 1164
97c145f7
JB
1165 /*
1166 * Power state could be unknown at this point, either due to a fresh
1167 * boot or a device removal call. So get the current power state
1168 * so that things like MSI message writing will behave as expected
1169 * (e.g. if the device really is in D0 at enable time).
1170 */
1171 if (dev->pm_cap) {
1172 u16 pmcsr;
1173 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1174 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1175 }
1176
9fb625c3
HS
1177 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1178 return 0; /* already enabled */
1179
497f16f2
YL
1180 /* only skip sriov related */
1181 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1182 if (dev->resource[i].flags & flags)
1183 bars |= (1 << i);
1184 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1185 if (dev->resource[i].flags & flags)
1186 bars |= (1 << i);
1187
38cc1302 1188 err = do_pci_enable_device(dev, bars);
95a62965 1189 if (err < 0)
38cc1302 1190 atomic_dec(&dev->enable_cnt);
9fb625c3 1191 return err;
1da177e4
LT
1192}
1193
b718989d
BH
1194/**
1195 * pci_enable_device_io - Initialize a device for use with IO space
1196 * @dev: PCI device to be initialized
1197 *
1198 * Initialize device before it's used by a driver. Ask low-level code
1199 * to enable I/O resources. Wake up the device if it was suspended.
1200 * Beware, this function can fail.
1201 */
1202int pci_enable_device_io(struct pci_dev *dev)
1203{
1204 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1205}
1206
1207/**
1208 * pci_enable_device_mem - Initialize a device for use with Memory space
1209 * @dev: PCI device to be initialized
1210 *
1211 * Initialize device before it's used by a driver. Ask low-level code
1212 * to enable Memory resources. Wake up the device if it was suspended.
1213 * Beware, this function can fail.
1214 */
1215int pci_enable_device_mem(struct pci_dev *dev)
1216{
1217 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1218}
1219
bae94d02
IPG
1220/**
1221 * pci_enable_device - Initialize device before it's used by a driver.
1222 * @dev: PCI device to be initialized
1223 *
1224 * Initialize device before it's used by a driver. Ask low-level code
1225 * to enable I/O and memory. Wake up the device if it was suspended.
1226 * Beware, this function can fail.
1227 *
1228 * Note we don't actually enable the device many times if we call
1229 * this function repeatedly (we just increment the count).
1230 */
1231int pci_enable_device(struct pci_dev *dev)
1232{
b718989d 1233 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1234}
1235
9ac7849e
TH
1236/*
1237 * Managed PCI resources. This manages device on/off, intx/msi/msix
1238 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1239 * there's no need to track it separately. pci_devres is initialized
1240 * when a device is enabled using managed PCI device enable interface.
1241 */
1242struct pci_devres {
7f375f32
TH
1243 unsigned int enabled:1;
1244 unsigned int pinned:1;
9ac7849e
TH
1245 unsigned int orig_intx:1;
1246 unsigned int restore_intx:1;
1247 u32 region_mask;
1248};
1249
1250static void pcim_release(struct device *gendev, void *res)
1251{
1252 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1253 struct pci_devres *this = res;
1254 int i;
1255
1256 if (dev->msi_enabled)
1257 pci_disable_msi(dev);
1258 if (dev->msix_enabled)
1259 pci_disable_msix(dev);
1260
1261 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1262 if (this->region_mask & (1 << i))
1263 pci_release_region(dev, i);
1264
1265 if (this->restore_intx)
1266 pci_intx(dev, this->orig_intx);
1267
7f375f32 1268 if (this->enabled && !this->pinned)
9ac7849e
TH
1269 pci_disable_device(dev);
1270}
1271
1272static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1273{
1274 struct pci_devres *dr, *new_dr;
1275
1276 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1277 if (dr)
1278 return dr;
1279
1280 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1281 if (!new_dr)
1282 return NULL;
1283 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1284}
1285
1286static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1287{
1288 if (pci_is_managed(pdev))
1289 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1290 return NULL;
1291}
1292
1293/**
1294 * pcim_enable_device - Managed pci_enable_device()
1295 * @pdev: PCI device to be initialized
1296 *
1297 * Managed pci_enable_device().
1298 */
1299int pcim_enable_device(struct pci_dev *pdev)
1300{
1301 struct pci_devres *dr;
1302 int rc;
1303
1304 dr = get_pci_dr(pdev);
1305 if (unlikely(!dr))
1306 return -ENOMEM;
b95d58ea
TH
1307 if (dr->enabled)
1308 return 0;
9ac7849e
TH
1309
1310 rc = pci_enable_device(pdev);
1311 if (!rc) {
1312 pdev->is_managed = 1;
7f375f32 1313 dr->enabled = 1;
9ac7849e
TH
1314 }
1315 return rc;
1316}
1317
1318/**
1319 * pcim_pin_device - Pin managed PCI device
1320 * @pdev: PCI device to pin
1321 *
1322 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1323 * driver detach. @pdev must have been enabled with
1324 * pcim_enable_device().
1325 */
1326void pcim_pin_device(struct pci_dev *pdev)
1327{
1328 struct pci_devres *dr;
1329
1330 dr = find_pci_dr(pdev);
7f375f32 1331 WARN_ON(!dr || !dr->enabled);
9ac7849e 1332 if (dr)
7f375f32 1333 dr->pinned = 1;
9ac7849e
TH
1334}
1335
1da177e4
LT
1336/**
1337 * pcibios_disable_device - disable arch specific PCI resources for device dev
1338 * @dev: the PCI device to disable
1339 *
1340 * Disables architecture specific PCI resources for the device. This
1341 * is the default implementation. Architecture implementations can
1342 * override this.
1343 */
d6d88c83 1344void __weak pcibios_disable_device (struct pci_dev *dev) {}
1da177e4 1345
fa58d305
RW
1346static void do_pci_disable_device(struct pci_dev *dev)
1347{
1348 u16 pci_command;
1349
1350 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1351 if (pci_command & PCI_COMMAND_MASTER) {
1352 pci_command &= ~PCI_COMMAND_MASTER;
1353 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1354 }
1355
1356 pcibios_disable_device(dev);
1357}
1358
1359/**
1360 * pci_disable_enabled_device - Disable device without updating enable_cnt
1361 * @dev: PCI device to disable
1362 *
1363 * NOTE: This function is a backend of PCI power management routines and is
1364 * not supposed to be called drivers.
1365 */
1366void pci_disable_enabled_device(struct pci_dev *dev)
1367{
296ccb08 1368 if (pci_is_enabled(dev))
fa58d305
RW
1369 do_pci_disable_device(dev);
1370}
1371
1da177e4
LT
1372/**
1373 * pci_disable_device - Disable PCI device after use
1374 * @dev: PCI device to be disabled
1375 *
1376 * Signal to the system that the PCI device is not in use by the system
1377 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1378 *
1379 * Note we don't actually disable the device until all callers of
ee6583f6 1380 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1381 */
1382void
1383pci_disable_device(struct pci_dev *dev)
1384{
9ac7849e 1385 struct pci_devres *dr;
99dc804d 1386
9ac7849e
TH
1387 dr = find_pci_dr(dev);
1388 if (dr)
7f375f32 1389 dr->enabled = 0;
9ac7849e 1390
bae94d02
IPG
1391 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1392 return;
1393
fa58d305 1394 do_pci_disable_device(dev);
1da177e4 1395
fa58d305 1396 dev->is_busmaster = 0;
1da177e4
LT
1397}
1398
f7bdd12d
BK
1399/**
1400 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1401 * @dev: the PCIe device reset
f7bdd12d
BK
1402 * @state: Reset state to enter into
1403 *
1404 *
45e829ea 1405 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1406 * implementation. Architecture implementations can override this.
1407 */
d6d88c83
BH
1408int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1409 enum pcie_reset_state state)
f7bdd12d
BK
1410{
1411 return -EINVAL;
1412}
1413
1414/**
1415 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1416 * @dev: the PCIe device reset
f7bdd12d
BK
1417 * @state: Reset state to enter into
1418 *
1419 *
1420 * Sets the PCI reset state for the device.
1421 */
1422int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1423{
1424 return pcibios_set_pcie_reset_state(dev, state);
1425}
1426
58ff4633
RW
1427/**
1428 * pci_check_pme_status - Check if given device has generated PME.
1429 * @dev: Device to check.
1430 *
1431 * Check the PME status of the device and if set, clear it and clear PME enable
1432 * (if set). Return 'true' if PME status and PME enable were both set or
1433 * 'false' otherwise.
1434 */
1435bool pci_check_pme_status(struct pci_dev *dev)
1436{
1437 int pmcsr_pos;
1438 u16 pmcsr;
1439 bool ret = false;
1440
1441 if (!dev->pm_cap)
1442 return false;
1443
1444 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1445 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1446 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1447 return false;
1448
1449 /* Clear PME status. */
1450 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1451 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1452 /* Disable PME to avoid interrupt flood. */
1453 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1454 ret = true;
1455 }
1456
1457 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1458
1459 return ret;
1460}
1461
b67ea761
RW
1462/**
1463 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1464 * @dev: Device to handle.
379021d5 1465 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1466 *
1467 * Check if @dev has generated PME and queue a resume request for it in that
1468 * case.
1469 */
379021d5 1470static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1471{
379021d5
RW
1472 if (pme_poll_reset && dev->pme_poll)
1473 dev->pme_poll = false;
1474
c125e96f 1475 if (pci_check_pme_status(dev)) {
c125e96f 1476 pci_wakeup_event(dev);
0f953bf6 1477 pm_request_resume(&dev->dev);
c125e96f 1478 }
b67ea761
RW
1479 return 0;
1480}
1481
1482/**
1483 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1484 * @bus: Top bus of the subtree to walk.
1485 */
1486void pci_pme_wakeup_bus(struct pci_bus *bus)
1487{
1488 if (bus)
379021d5 1489 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1490}
1491
448bd857
HY
1492/**
1493 * pci_wakeup - Wake up a PCI device
ceaf5b5f 1494 * @pci_dev: Device to handle.
448bd857
HY
1495 * @ign: ignored parameter
1496 */
1497static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1498{
1499 pci_wakeup_event(pci_dev);
1500 pm_request_resume(&pci_dev->dev);
1501 return 0;
1502}
1503
1504/**
1505 * pci_wakeup_bus - Walk given bus and wake up devices on it
1506 * @bus: Top bus of the subtree to walk.
1507 */
1508void pci_wakeup_bus(struct pci_bus *bus)
1509{
1510 if (bus)
1511 pci_walk_bus(bus, pci_wakeup, NULL);
1512}
1513
eb9d0fe4
RW
1514/**
1515 * pci_pme_capable - check the capability of PCI device to generate PME#
1516 * @dev: PCI device to handle.
eb9d0fe4
RW
1517 * @state: PCI state from which device will issue PME#.
1518 */
e5899e1b 1519bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1520{
337001b6 1521 if (!dev->pm_cap)
eb9d0fe4
RW
1522 return false;
1523
337001b6 1524 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1525}
1526
df17e62e
MG
1527static void pci_pme_list_scan(struct work_struct *work)
1528{
379021d5 1529 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1530
1531 mutex_lock(&pci_pme_list_mutex);
1532 if (!list_empty(&pci_pme_list)) {
379021d5
RW
1533 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1534 if (pme_dev->dev->pme_poll) {
71a83bd7
ZY
1535 struct pci_dev *bridge;
1536
1537 bridge = pme_dev->dev->bus->self;
1538 /*
1539 * If bridge is in low power state, the
1540 * configuration space of subordinate devices
1541 * may be not accessible
1542 */
1543 if (bridge && bridge->current_state != PCI_D0)
1544 continue;
379021d5
RW
1545 pci_pme_wakeup(pme_dev->dev, NULL);
1546 } else {
1547 list_del(&pme_dev->list);
1548 kfree(pme_dev);
1549 }
1550 }
1551 if (!list_empty(&pci_pme_list))
1552 schedule_delayed_work(&pci_pme_work,
1553 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1554 }
1555 mutex_unlock(&pci_pme_list_mutex);
1556}
1557
eb9d0fe4
RW
1558/**
1559 * pci_pme_active - enable or disable PCI device's PME# function
1560 * @dev: PCI device to handle.
eb9d0fe4
RW
1561 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1562 *
1563 * The caller must verify that the device is capable of generating PME# before
1564 * calling this function with @enable equal to 'true'.
1565 */
5a6c9b60 1566void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1567{
1568 u16 pmcsr;
1569
337001b6 1570 if (!dev->pm_cap)
eb9d0fe4
RW
1571 return;
1572
337001b6 1573 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1574 /* Clear PME_Status by writing 1 to it and enable PME# */
1575 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1576 if (!enable)
1577 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1578
337001b6 1579 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1580
df17e62e
MG
1581 /* PCI (as opposed to PCIe) PME requires that the device have
1582 its PME# line hooked up correctly. Not all hardware vendors
1583 do this, so the PME never gets delivered and the device
1584 remains asleep. The easiest way around this is to
1585 periodically walk the list of suspended devices and check
1586 whether any have their PME flag set. The assumption is that
1587 we'll wake up often enough anyway that this won't be a huge
1588 hit, and the power savings from the devices will still be a
1589 win. */
1590
379021d5 1591 if (dev->pme_poll) {
df17e62e
MG
1592 struct pci_pme_device *pme_dev;
1593 if (enable) {
1594 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1595 GFP_KERNEL);
1596 if (!pme_dev)
1597 goto out;
1598 pme_dev->dev = dev;
1599 mutex_lock(&pci_pme_list_mutex);
1600 list_add(&pme_dev->list, &pci_pme_list);
1601 if (list_is_singular(&pci_pme_list))
1602 schedule_delayed_work(&pci_pme_work,
1603 msecs_to_jiffies(PME_TIMEOUT));
1604 mutex_unlock(&pci_pme_list_mutex);
1605 } else {
1606 mutex_lock(&pci_pme_list_mutex);
1607 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1608 if (pme_dev->dev == dev) {
1609 list_del(&pme_dev->list);
1610 kfree(pme_dev);
1611 break;
1612 }
1613 }
1614 mutex_unlock(&pci_pme_list_mutex);
1615 }
1616 }
1617
1618out:
85b8582d 1619 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1620}
1621
1da177e4 1622/**
6cbf8214 1623 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1624 * @dev: PCI device affected
1625 * @state: PCI state from which device will issue wakeup events
6cbf8214 1626 * @runtime: True if the events are to be generated at run time
075c1771
DB
1627 * @enable: True to enable event generation; false to disable
1628 *
1629 * This enables the device as a wakeup event source, or disables it.
1630 * When such events involves platform-specific hooks, those hooks are
1631 * called automatically by this routine.
1632 *
1633 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1634 * always require such platform hooks.
075c1771 1635 *
eb9d0fe4
RW
1636 * RETURN VALUE:
1637 * 0 is returned on success
1638 * -EINVAL is returned if device is not supposed to wake up the system
1639 * Error code depending on the platform is returned if both the platform and
1640 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1641 */
6cbf8214
RW
1642int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1643 bool runtime, bool enable)
1da177e4 1644{
5bcc2fb4 1645 int ret = 0;
075c1771 1646
6cbf8214 1647 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1648 return -EINVAL;
1da177e4 1649
e80bb09d
RW
1650 /* Don't do the same thing twice in a row for one device. */
1651 if (!!enable == !!dev->wakeup_prepared)
1652 return 0;
1653
eb9d0fe4
RW
1654 /*
1655 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1656 * Anderson we should be doing PME# wake enable followed by ACPI wake
1657 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1658 */
1da177e4 1659
5bcc2fb4
RW
1660 if (enable) {
1661 int error;
1da177e4 1662
5bcc2fb4
RW
1663 if (pci_pme_capable(dev, state))
1664 pci_pme_active(dev, true);
1665 else
1666 ret = 1;
6cbf8214
RW
1667 error = runtime ? platform_pci_run_wake(dev, true) :
1668 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1669 if (ret)
1670 ret = error;
e80bb09d
RW
1671 if (!ret)
1672 dev->wakeup_prepared = true;
5bcc2fb4 1673 } else {
6cbf8214
RW
1674 if (runtime)
1675 platform_pci_run_wake(dev, false);
1676 else
1677 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1678 pci_pme_active(dev, false);
e80bb09d 1679 dev->wakeup_prepared = false;
5bcc2fb4 1680 }
1da177e4 1681
5bcc2fb4 1682 return ret;
eb9d0fe4 1683}
6cbf8214 1684EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1685
0235c4fc
RW
1686/**
1687 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1688 * @dev: PCI device to prepare
1689 * @enable: True to enable wake-up event generation; false to disable
1690 *
1691 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1692 * and this function allows them to set that up cleanly - pci_enable_wake()
1693 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1694 * ordering constraints.
1695 *
1696 * This function only returns error code if the device is not capable of
1697 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1698 * enable wake-up power for it.
1699 */
1700int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1701{
1702 return pci_pme_capable(dev, PCI_D3cold) ?
1703 pci_enable_wake(dev, PCI_D3cold, enable) :
1704 pci_enable_wake(dev, PCI_D3hot, enable);
1705}
1706
404cc2d8 1707/**
37139074
JB
1708 * pci_target_state - find an appropriate low power state for a given PCI dev
1709 * @dev: PCI device
1710 *
1711 * Use underlying platform code to find a supported low power state for @dev.
1712 * If the platform can't manage @dev, return the deepest state from which it
1713 * can generate wake events, based on any available PME info.
404cc2d8 1714 */
e5899e1b 1715pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1716{
1717 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1718
1719 if (platform_pci_power_manageable(dev)) {
1720 /*
1721 * Call the platform to choose the target state of the device
1722 * and enable wake-up from this state if supported.
1723 */
1724 pci_power_t state = platform_pci_choose_state(dev);
1725
1726 switch (state) {
1727 case PCI_POWER_ERROR:
1728 case PCI_UNKNOWN:
1729 break;
1730 case PCI_D1:
1731 case PCI_D2:
1732 if (pci_no_d1d2(dev))
1733 break;
1734 default:
1735 target_state = state;
404cc2d8 1736 }
d2abdf62
RW
1737 } else if (!dev->pm_cap) {
1738 target_state = PCI_D0;
404cc2d8
RW
1739 } else if (device_may_wakeup(&dev->dev)) {
1740 /*
1741 * Find the deepest state from which the device can generate
1742 * wake-up events, make it the target state and enable device
1743 * to generate PME#.
1744 */
337001b6
RW
1745 if (dev->pme_support) {
1746 while (target_state
1747 && !(dev->pme_support & (1 << target_state)))
1748 target_state--;
404cc2d8
RW
1749 }
1750 }
1751
e5899e1b
RW
1752 return target_state;
1753}
1754
1755/**
1756 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1757 * @dev: Device to handle.
1758 *
1759 * Choose the power state appropriate for the device depending on whether
1760 * it can wake up the system and/or is power manageable by the platform
1761 * (PCI_D3hot is the default) and put the device into that state.
1762 */
1763int pci_prepare_to_sleep(struct pci_dev *dev)
1764{
1765 pci_power_t target_state = pci_target_state(dev);
1766 int error;
1767
1768 if (target_state == PCI_POWER_ERROR)
1769 return -EIO;
1770
448bd857
HY
1771 /* D3cold during system suspend/hibernate is not supported */
1772 if (target_state > PCI_D3hot)
1773 target_state = PCI_D3hot;
1774
8efb8c76 1775 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1776
404cc2d8
RW
1777 error = pci_set_power_state(dev, target_state);
1778
1779 if (error)
1780 pci_enable_wake(dev, target_state, false);
1781
1782 return error;
1783}
1784
1785/**
443bd1c4 1786 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1787 * @dev: Device to handle.
1788 *
88393161 1789 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1790 */
1791int pci_back_from_sleep(struct pci_dev *dev)
1792{
1793 pci_enable_wake(dev, PCI_D0, false);
1794 return pci_set_power_state(dev, PCI_D0);
1795}
1796
6cbf8214
RW
1797/**
1798 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1799 * @dev: PCI device being suspended.
1800 *
1801 * Prepare @dev to generate wake-up events at run time and put it into a low
1802 * power state.
1803 */
1804int pci_finish_runtime_suspend(struct pci_dev *dev)
1805{
1806 pci_power_t target_state = pci_target_state(dev);
1807 int error;
1808
1809 if (target_state == PCI_POWER_ERROR)
1810 return -EIO;
1811
448bd857
HY
1812 dev->runtime_d3cold = target_state == PCI_D3cold;
1813
6cbf8214
RW
1814 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1815
1816 error = pci_set_power_state(dev, target_state);
1817
448bd857 1818 if (error) {
6cbf8214 1819 __pci_enable_wake(dev, target_state, true, false);
448bd857
HY
1820 dev->runtime_d3cold = false;
1821 }
6cbf8214
RW
1822
1823 return error;
1824}
1825
b67ea761
RW
1826/**
1827 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1828 * @dev: Device to check.
1829 *
1830 * Return true if the device itself is cabable of generating wake-up events
1831 * (through the platform or using the native PCIe PME) or if the device supports
1832 * PME and one of its upstream bridges can generate wake-up events.
1833 */
1834bool pci_dev_run_wake(struct pci_dev *dev)
1835{
1836 struct pci_bus *bus = dev->bus;
1837
1838 if (device_run_wake(&dev->dev))
1839 return true;
1840
1841 if (!dev->pme_support)
1842 return false;
1843
1844 while (bus->parent) {
1845 struct pci_dev *bridge = bus->self;
1846
1847 if (device_run_wake(&bridge->dev))
1848 return true;
1849
1850 bus = bus->parent;
1851 }
1852
1853 /* We have reached the root bus. */
1854 if (bus->bridge)
1855 return device_run_wake(bus->bridge);
1856
1857 return false;
1858}
1859EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1860
eb9d0fe4
RW
1861/**
1862 * pci_pm_init - Initialize PM functions of given PCI device
1863 * @dev: PCI device to handle.
1864 */
1865void pci_pm_init(struct pci_dev *dev)
1866{
1867 int pm;
1868 u16 pmc;
1da177e4 1869
bb910a70 1870 pm_runtime_forbid(&dev->dev);
a1e4d72c 1871 device_enable_async_suspend(&dev->dev);
e80bb09d 1872 dev->wakeup_prepared = false;
bb910a70 1873
337001b6
RW
1874 dev->pm_cap = 0;
1875
eb9d0fe4
RW
1876 /* find PCI PM capability in list */
1877 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1878 if (!pm)
50246dd4 1879 return;
eb9d0fe4
RW
1880 /* Check device's ability to generate PME# */
1881 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 1882
eb9d0fe4
RW
1883 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1884 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1885 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 1886 return;
eb9d0fe4
RW
1887 }
1888
337001b6 1889 dev->pm_cap = pm;
1ae861e6 1890 dev->d3_delay = PCI_PM_D3_WAIT;
448bd857 1891 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
4f9c1397 1892 dev->d3cold_allowed = true;
337001b6
RW
1893
1894 dev->d1_support = false;
1895 dev->d2_support = false;
1896 if (!pci_no_d1d2(dev)) {
c9ed77ee 1897 if (pmc & PCI_PM_CAP_D1)
337001b6 1898 dev->d1_support = true;
c9ed77ee 1899 if (pmc & PCI_PM_CAP_D2)
337001b6 1900 dev->d2_support = true;
c9ed77ee
BH
1901
1902 if (dev->d1_support || dev->d2_support)
1903 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
1904 dev->d1_support ? " D1" : "",
1905 dev->d2_support ? " D2" : "");
337001b6
RW
1906 }
1907
1908 pmc &= PCI_PM_CAP_PME_MASK;
1909 if (pmc) {
10c3d71d
BH
1910 dev_printk(KERN_DEBUG, &dev->dev,
1911 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
1912 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1913 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1914 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1915 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1916 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 1917 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 1918 dev->pme_poll = true;
eb9d0fe4
RW
1919 /*
1920 * Make device's PM flags reflect the wake-up capability, but
1921 * let the user space enable it to wake up the system as needed.
1922 */
1923 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 1924 /* Disable the PME# generation functionality */
337001b6
RW
1925 pci_pme_active(dev, false);
1926 } else {
1927 dev->pme_support = 0;
eb9d0fe4 1928 }
1da177e4
LT
1929}
1930
eb9c39d0
JB
1931/**
1932 * platform_pci_wakeup_init - init platform wakeup if present
1933 * @dev: PCI device
1934 *
1935 * Some devices don't have PCI PM caps but can still generate wakeup
1936 * events through platform methods (like ACPI events). If @dev supports
1937 * platform wakeup events, set the device flag to indicate as much. This
1938 * may be redundant if the device also supports PCI PM caps, but double
1939 * initialization should be safe in that case.
1940 */
1941void platform_pci_wakeup_init(struct pci_dev *dev)
1942{
1943 if (!platform_pci_can_wakeup(dev))
1944 return;
1945
1946 device_set_wakeup_capable(&dev->dev, true);
eb9c39d0
JB
1947 platform_pci_sleep_wake(dev, false);
1948}
1949
34a4876e
YL
1950static void pci_add_saved_cap(struct pci_dev *pci_dev,
1951 struct pci_cap_saved_state *new_cap)
1952{
1953 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1954}
1955
63f4898a
RW
1956/**
1957 * pci_add_save_buffer - allocate buffer for saving given capability registers
1958 * @dev: the PCI device
1959 * @cap: the capability to allocate the buffer for
1960 * @size: requested size of the buffer
1961 */
1962static int pci_add_cap_save_buffer(
1963 struct pci_dev *dev, char cap, unsigned int size)
1964{
1965 int pos;
1966 struct pci_cap_saved_state *save_state;
1967
1968 pos = pci_find_capability(dev, cap);
1969 if (pos <= 0)
1970 return 0;
1971
1972 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1973 if (!save_state)
1974 return -ENOMEM;
1975
24a4742f
AW
1976 save_state->cap.cap_nr = cap;
1977 save_state->cap.size = size;
63f4898a
RW
1978 pci_add_saved_cap(dev, save_state);
1979
1980 return 0;
1981}
1982
1983/**
1984 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1985 * @dev: the PCI device
1986 */
1987void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1988{
1989 int error;
1990
89858517
YZ
1991 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1992 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
1993 if (error)
1994 dev_err(&dev->dev,
1995 "unable to preallocate PCI Express save buffer\n");
1996
1997 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1998 if (error)
1999 dev_err(&dev->dev,
2000 "unable to preallocate PCI-X save buffer\n");
2001}
2002
f796841e
YL
2003void pci_free_cap_save_buffers(struct pci_dev *dev)
2004{
2005 struct pci_cap_saved_state *tmp;
2006 struct hlist_node *pos, *n;
2007
2008 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2009 kfree(tmp);
2010}
2011
58c3a727
YZ
2012/**
2013 * pci_enable_ari - enable ARI forwarding if hardware support it
2014 * @dev: the PCI device
2015 */
2016void pci_enable_ari(struct pci_dev *dev)
2017{
58c3a727 2018 u32 cap;
8113587c 2019 struct pci_dev *bridge;
58c3a727 2020
6748dcc2 2021 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
2022 return;
2023
59875ae4 2024 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI))
58c3a727
YZ
2025 return;
2026
8113587c 2027 bridge = dev->bus->self;
cb97ae34 2028 if (!bridge)
8113587c
ZY
2029 return;
2030
59875ae4 2031 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2032 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2033 return;
2034
59875ae4 2035 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI);
8113587c 2036 bridge->ari_enabled = 1;
58c3a727
YZ
2037}
2038
b48d4425 2039/**
c463b8cb 2040 * pci_enable_ido - enable ID-based Ordering on a device
b48d4425
JB
2041 * @dev: the PCI device
2042 * @type: which types of IDO to enable
2043 *
2044 * Enable ID-based ordering on @dev. @type can contain the bits
2045 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2046 * which types of transactions are allowed to be re-ordered.
2047 */
2048void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2049{
59875ae4 2050 u16 ctrl = 0;
b48d4425 2051
b48d4425
JB
2052 if (type & PCI_EXP_IDO_REQUEST)
2053 ctrl |= PCI_EXP_IDO_REQ_EN;
2054 if (type & PCI_EXP_IDO_COMPLETION)
2055 ctrl |= PCI_EXP_IDO_CMP_EN;
59875ae4
JL
2056 if (ctrl)
2057 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
b48d4425
JB
2058}
2059EXPORT_SYMBOL(pci_enable_ido);
2060
2061/**
2062 * pci_disable_ido - disable ID-based ordering on a device
2063 * @dev: the PCI device
2064 * @type: which types of IDO to disable
2065 */
2066void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2067{
59875ae4 2068 u16 ctrl = 0;
b48d4425 2069
b48d4425 2070 if (type & PCI_EXP_IDO_REQUEST)
59875ae4 2071 ctrl |= PCI_EXP_IDO_REQ_EN;
b48d4425 2072 if (type & PCI_EXP_IDO_COMPLETION)
59875ae4
JL
2073 ctrl |= PCI_EXP_IDO_CMP_EN;
2074 if (ctrl)
2075 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
b48d4425
JB
2076}
2077EXPORT_SYMBOL(pci_disable_ido);
2078
48a92a81
JB
2079/**
2080 * pci_enable_obff - enable optimized buffer flush/fill
2081 * @dev: PCI device
2082 * @type: type of signaling to use
2083 *
2084 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2085 * signaling if possible, falling back to message signaling only if
2086 * WAKE# isn't supported. @type should indicate whether the PCIe link
2087 * be brought out of L0s or L1 to send the message. It should be either
2088 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2089 *
2090 * If your device can benefit from receiving all messages, even at the
2091 * power cost of bringing the link back up from a low power state, use
2092 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2093 * preferred type).
2094 *
2095 * RETURNS:
2096 * Zero on success, appropriate error number on failure.
2097 */
2098int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2099{
48a92a81
JB
2100 u32 cap;
2101 u16 ctrl;
2102 int ret;
2103
59875ae4 2104 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
48a92a81
JB
2105 if (!(cap & PCI_EXP_OBFF_MASK))
2106 return -ENOTSUPP; /* no OBFF support at all */
2107
2108 /* Make sure the topology supports OBFF as well */
8291550f 2109 if (dev->bus->self) {
48a92a81
JB
2110 ret = pci_enable_obff(dev->bus->self, type);
2111 if (ret)
2112 return ret;
2113 }
2114
59875ae4 2115 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
48a92a81
JB
2116 if (cap & PCI_EXP_OBFF_WAKE)
2117 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2118 else {
2119 switch (type) {
2120 case PCI_EXP_OBFF_SIGNAL_L0:
2121 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2122 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2123 break;
2124 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2125 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2126 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2127 break;
2128 default:
2129 WARN(1, "bad OBFF signal type\n");
2130 return -ENOTSUPP;
2131 }
2132 }
59875ae4 2133 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl);
48a92a81
JB
2134
2135 return 0;
2136}
2137EXPORT_SYMBOL(pci_enable_obff);
2138
2139/**
2140 * pci_disable_obff - disable optimized buffer flush/fill
2141 * @dev: PCI device
2142 *
2143 * Disable OBFF on @dev.
2144 */
2145void pci_disable_obff(struct pci_dev *dev)
2146{
59875ae4 2147 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN);
48a92a81
JB
2148}
2149EXPORT_SYMBOL(pci_disable_obff);
2150
51c2e0a7
JB
2151/**
2152 * pci_ltr_supported - check whether a device supports LTR
2153 * @dev: PCI device
2154 *
2155 * RETURNS:
2156 * True if @dev supports latency tolerance reporting, false otherwise.
2157 */
c32823f8 2158static bool pci_ltr_supported(struct pci_dev *dev)
51c2e0a7 2159{
51c2e0a7
JB
2160 u32 cap;
2161
59875ae4 2162 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
51c2e0a7
JB
2163
2164 return cap & PCI_EXP_DEVCAP2_LTR;
2165}
51c2e0a7
JB
2166
2167/**
2168 * pci_enable_ltr - enable latency tolerance reporting
2169 * @dev: PCI device
2170 *
2171 * Enable LTR on @dev if possible, which means enabling it first on
2172 * upstream ports.
2173 *
2174 * RETURNS:
2175 * Zero on success, errno on failure.
2176 */
2177int pci_enable_ltr(struct pci_dev *dev)
2178{
51c2e0a7
JB
2179 int ret;
2180
51c2e0a7
JB
2181 /* Only primary function can enable/disable LTR */
2182 if (PCI_FUNC(dev->devfn) != 0)
2183 return -EINVAL;
2184
59875ae4
JL
2185 if (!pci_ltr_supported(dev))
2186 return -ENOTSUPP;
2187
51c2e0a7 2188 /* Enable upstream ports first */
8291550f 2189 if (dev->bus->self) {
51c2e0a7
JB
2190 ret = pci_enable_ltr(dev->bus->self);
2191 if (ret)
2192 return ret;
2193 }
2194
59875ae4 2195 return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
51c2e0a7
JB
2196}
2197EXPORT_SYMBOL(pci_enable_ltr);
2198
2199/**
2200 * pci_disable_ltr - disable latency tolerance reporting
2201 * @dev: PCI device
2202 */
2203void pci_disable_ltr(struct pci_dev *dev)
2204{
51c2e0a7
JB
2205 /* Only primary function can enable/disable LTR */
2206 if (PCI_FUNC(dev->devfn) != 0)
2207 return;
2208
59875ae4
JL
2209 if (!pci_ltr_supported(dev))
2210 return;
2211
2212 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
51c2e0a7
JB
2213}
2214EXPORT_SYMBOL(pci_disable_ltr);
2215
2216static int __pci_ltr_scale(int *val)
2217{
2218 int scale = 0;
2219
2220 while (*val > 1023) {
2221 *val = (*val + 31) / 32;
2222 scale++;
2223 }
2224 return scale;
2225}
2226
2227/**
2228 * pci_set_ltr - set LTR latency values
2229 * @dev: PCI device
2230 * @snoop_lat_ns: snoop latency in nanoseconds
2231 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2232 *
2233 * Figure out the scale and set the LTR values accordingly.
2234 */
2235int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2236{
2237 int pos, ret, snoop_scale, nosnoop_scale;
2238 u16 val;
2239
2240 if (!pci_ltr_supported(dev))
2241 return -ENOTSUPP;
2242
2243 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2244 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2245
2246 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2247 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2248 return -EINVAL;
2249
2250 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2251 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2252 return -EINVAL;
2253
2254 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2255 if (!pos)
2256 return -ENOTSUPP;
2257
2258 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2259 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2260 if (ret != 4)
2261 return -EIO;
2262
2263 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2264 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2265 if (ret != 4)
2266 return -EIO;
2267
2268 return 0;
2269}
2270EXPORT_SYMBOL(pci_set_ltr);
2271
5d990b62
CW
2272static int pci_acs_enable;
2273
2274/**
2275 * pci_request_acs - ask for ACS to be enabled if supported
2276 */
2277void pci_request_acs(void)
2278{
2279 pci_acs_enable = 1;
2280}
2281
ae21ee65
AK
2282/**
2283 * pci_enable_acs - enable ACS if hardware support it
2284 * @dev: the PCI device
2285 */
2286void pci_enable_acs(struct pci_dev *dev)
2287{
2288 int pos;
2289 u16 cap;
2290 u16 ctrl;
2291
5d990b62
CW
2292 if (!pci_acs_enable)
2293 return;
2294
ae21ee65
AK
2295 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2296 if (!pos)
2297 return;
2298
2299 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2300 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2301
2302 /* Source Validation */
2303 ctrl |= (cap & PCI_ACS_SV);
2304
2305 /* P2P Request Redirect */
2306 ctrl |= (cap & PCI_ACS_RR);
2307
2308 /* P2P Completion Redirect */
2309 ctrl |= (cap & PCI_ACS_CR);
2310
2311 /* Upstream Forwarding */
2312 ctrl |= (cap & PCI_ACS_UF);
2313
2314 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2315}
2316
ad805758
AW
2317/**
2318 * pci_acs_enabled - test ACS against required flags for a given device
2319 * @pdev: device to test
2320 * @acs_flags: required PCI ACS flags
2321 *
2322 * Return true if the device supports the provided flags. Automatically
2323 * filters out flags that are not implemented on multifunction devices.
2324 */
2325bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2326{
2327 int pos, ret;
2328 u16 ctrl;
2329
2330 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2331 if (ret >= 0)
2332 return ret > 0;
2333
2334 if (!pci_is_pcie(pdev))
2335 return false;
2336
2337 /* Filter out flags not applicable to multifunction */
2338 if (pdev->multifunction)
2339 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2340 PCI_ACS_EC | PCI_ACS_DT);
2341
62f87c0e
YW
2342 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
2343 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
ad805758
AW
2344 pdev->multifunction) {
2345 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2346 if (!pos)
2347 return false;
2348
2349 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2350 if ((ctrl & acs_flags) != acs_flags)
2351 return false;
2352 }
2353
2354 return true;
2355}
2356
2357/**
2358 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2359 * @start: starting downstream device
2360 * @end: ending upstream device or NULL to search to the root bus
2361 * @acs_flags: required flags
2362 *
2363 * Walk up a device tree from start to end testing PCI ACS support. If
2364 * any step along the way does not support the required flags, return false.
2365 */
2366bool pci_acs_path_enabled(struct pci_dev *start,
2367 struct pci_dev *end, u16 acs_flags)
2368{
2369 struct pci_dev *pdev, *parent = start;
2370
2371 do {
2372 pdev = parent;
2373
2374 if (!pci_acs_enabled(pdev, acs_flags))
2375 return false;
2376
2377 if (pci_is_root_bus(pdev->bus))
2378 return (end == NULL);
2379
2380 parent = pdev->bus->self;
2381 } while (pdev != end);
2382
2383 return true;
2384}
2385
57c2cf71
BH
2386/**
2387 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2388 * @dev: the PCI device
2389 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2390 *
2391 * Perform INTx swizzling for a device behind one level of bridge. This is
2392 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2393 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2394 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2395 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 2396 */
3df425f3 2397u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 2398{
46b952a3
MW
2399 int slot;
2400
2401 if (pci_ari_enabled(dev->bus))
2402 slot = 0;
2403 else
2404 slot = PCI_SLOT(dev->devfn);
2405
2406 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2407}
2408
1da177e4
LT
2409int
2410pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2411{
2412 u8 pin;
2413
514d207d 2414 pin = dev->pin;
1da177e4
LT
2415 if (!pin)
2416 return -1;
878f2e50 2417
8784fd4d 2418 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2419 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2420 dev = dev->bus->self;
2421 }
2422 *bridge = dev;
2423 return pin;
2424}
2425
68feac87
BH
2426/**
2427 * pci_common_swizzle - swizzle INTx all the way to root bridge
2428 * @dev: the PCI device
2429 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2430 *
2431 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2432 * bridges all the way up to a PCI root bus.
2433 */
2434u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2435{
2436 u8 pin = *pinp;
2437
1eb39487 2438 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2439 pin = pci_swizzle_interrupt_pin(dev, pin);
2440 dev = dev->bus->self;
2441 }
2442 *pinp = pin;
2443 return PCI_SLOT(dev->devfn);
2444}
2445
1da177e4
LT
2446/**
2447 * pci_release_region - Release a PCI bar
2448 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2449 * @bar: BAR to release
2450 *
2451 * Releases the PCI I/O and memory resources previously reserved by a
2452 * successful call to pci_request_region. Call this function only
2453 * after all use of the PCI regions has ceased.
2454 */
2455void pci_release_region(struct pci_dev *pdev, int bar)
2456{
9ac7849e
TH
2457 struct pci_devres *dr;
2458
1da177e4
LT
2459 if (pci_resource_len(pdev, bar) == 0)
2460 return;
2461 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2462 release_region(pci_resource_start(pdev, bar),
2463 pci_resource_len(pdev, bar));
2464 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2465 release_mem_region(pci_resource_start(pdev, bar),
2466 pci_resource_len(pdev, bar));
9ac7849e
TH
2467
2468 dr = find_pci_dr(pdev);
2469 if (dr)
2470 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2471}
2472
2473/**
f5ddcac4 2474 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2475 * @pdev: PCI device whose resources are to be reserved
2476 * @bar: BAR to be reserved
2477 * @res_name: Name to be associated with resource.
f5ddcac4 2478 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2479 *
2480 * Mark the PCI region associated with PCI device @pdev BR @bar as
2481 * being reserved by owner @res_name. Do not access any
2482 * address inside the PCI regions unless this call returns
2483 * successfully.
2484 *
f5ddcac4
RD
2485 * If @exclusive is set, then the region is marked so that userspace
2486 * is explicitly not allowed to map the resource via /dev/mem or
2487 * sysfs MMIO access.
2488 *
1da177e4
LT
2489 * Returns 0 on success, or %EBUSY on error. A warning
2490 * message is also printed on failure.
2491 */
e8de1481
AV
2492static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2493 int exclusive)
1da177e4 2494{
9ac7849e
TH
2495 struct pci_devres *dr;
2496
1da177e4
LT
2497 if (pci_resource_len(pdev, bar) == 0)
2498 return 0;
2499
2500 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2501 if (!request_region(pci_resource_start(pdev, bar),
2502 pci_resource_len(pdev, bar), res_name))
2503 goto err_out;
2504 }
2505 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2506 if (!__request_mem_region(pci_resource_start(pdev, bar),
2507 pci_resource_len(pdev, bar), res_name,
2508 exclusive))
1da177e4
LT
2509 goto err_out;
2510 }
9ac7849e
TH
2511
2512 dr = find_pci_dr(pdev);
2513 if (dr)
2514 dr->region_mask |= 1 << bar;
2515
1da177e4
LT
2516 return 0;
2517
2518err_out:
c7dabef8 2519 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2520 &pdev->resource[bar]);
1da177e4
LT
2521 return -EBUSY;
2522}
2523
e8de1481 2524/**
f5ddcac4 2525 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2526 * @pdev: PCI device whose resources are to be reserved
2527 * @bar: BAR to be reserved
f5ddcac4 2528 * @res_name: Name to be associated with resource
e8de1481 2529 *
f5ddcac4 2530 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2531 * being reserved by owner @res_name. Do not access any
2532 * address inside the PCI regions unless this call returns
2533 * successfully.
2534 *
2535 * Returns 0 on success, or %EBUSY on error. A warning
2536 * message is also printed on failure.
2537 */
2538int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2539{
2540 return __pci_request_region(pdev, bar, res_name, 0);
2541}
2542
2543/**
2544 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2545 * @pdev: PCI device whose resources are to be reserved
2546 * @bar: BAR to be reserved
2547 * @res_name: Name to be associated with resource.
2548 *
2549 * Mark the PCI region associated with PCI device @pdev BR @bar as
2550 * being reserved by owner @res_name. Do not access any
2551 * address inside the PCI regions unless this call returns
2552 * successfully.
2553 *
2554 * Returns 0 on success, or %EBUSY on error. A warning
2555 * message is also printed on failure.
2556 *
2557 * The key difference that _exclusive makes it that userspace is
2558 * explicitly not allowed to map the resource via /dev/mem or
2559 * sysfs.
2560 */
2561int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2562{
2563 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2564}
c87deff7
HS
2565/**
2566 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2567 * @pdev: PCI device whose resources were previously reserved
2568 * @bars: Bitmask of BARs to be released
2569 *
2570 * Release selected PCI I/O and memory resources previously reserved.
2571 * Call this function only after all use of the PCI regions has ceased.
2572 */
2573void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2574{
2575 int i;
2576
2577 for (i = 0; i < 6; i++)
2578 if (bars & (1 << i))
2579 pci_release_region(pdev, i);
2580}
2581
e8de1481
AV
2582int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2583 const char *res_name, int excl)
c87deff7
HS
2584{
2585 int i;
2586
2587 for (i = 0; i < 6; i++)
2588 if (bars & (1 << i))
e8de1481 2589 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2590 goto err_out;
2591 return 0;
2592
2593err_out:
2594 while(--i >= 0)
2595 if (bars & (1 << i))
2596 pci_release_region(pdev, i);
2597
2598 return -EBUSY;
2599}
1da177e4 2600
e8de1481
AV
2601
2602/**
2603 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2604 * @pdev: PCI device whose resources are to be reserved
2605 * @bars: Bitmask of BARs to be requested
2606 * @res_name: Name to be associated with resource
2607 */
2608int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2609 const char *res_name)
2610{
2611 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2612}
2613
2614int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2615 int bars, const char *res_name)
2616{
2617 return __pci_request_selected_regions(pdev, bars, res_name,
2618 IORESOURCE_EXCLUSIVE);
2619}
2620
1da177e4
LT
2621/**
2622 * pci_release_regions - Release reserved PCI I/O and memory resources
2623 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2624 *
2625 * Releases all PCI I/O and memory resources previously reserved by a
2626 * successful call to pci_request_regions. Call this function only
2627 * after all use of the PCI regions has ceased.
2628 */
2629
2630void pci_release_regions(struct pci_dev *pdev)
2631{
c87deff7 2632 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2633}
2634
2635/**
2636 * pci_request_regions - Reserved PCI I/O and memory resources
2637 * @pdev: PCI device whose resources are to be reserved
2638 * @res_name: Name to be associated with resource.
2639 *
2640 * Mark all PCI regions associated with PCI device @pdev as
2641 * being reserved by owner @res_name. Do not access any
2642 * address inside the PCI regions unless this call returns
2643 * successfully.
2644 *
2645 * Returns 0 on success, or %EBUSY on error. A warning
2646 * message is also printed on failure.
2647 */
3c990e92 2648int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2649{
c87deff7 2650 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2651}
2652
e8de1481
AV
2653/**
2654 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2655 * @pdev: PCI device whose resources are to be reserved
2656 * @res_name: Name to be associated with resource.
2657 *
2658 * Mark all PCI regions associated with PCI device @pdev as
2659 * being reserved by owner @res_name. Do not access any
2660 * address inside the PCI regions unless this call returns
2661 * successfully.
2662 *
2663 * pci_request_regions_exclusive() will mark the region so that
2664 * /dev/mem and the sysfs MMIO access will not be allowed.
2665 *
2666 * Returns 0 on success, or %EBUSY on error. A warning
2667 * message is also printed on failure.
2668 */
2669int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2670{
2671 return pci_request_selected_regions_exclusive(pdev,
2672 ((1 << 6) - 1), res_name);
2673}
2674
6a479079
BH
2675static void __pci_set_master(struct pci_dev *dev, bool enable)
2676{
2677 u16 old_cmd, cmd;
2678
2679 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2680 if (enable)
2681 cmd = old_cmd | PCI_COMMAND_MASTER;
2682 else
2683 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2684 if (cmd != old_cmd) {
2685 dev_dbg(&dev->dev, "%s bus mastering\n",
2686 enable ? "enabling" : "disabling");
2687 pci_write_config_word(dev, PCI_COMMAND, cmd);
2688 }
2689 dev->is_busmaster = enable;
2690}
e8de1481 2691
2b6f2c35
MS
2692/**
2693 * pcibios_setup - process "pci=" kernel boot arguments
2694 * @str: string used to pass in "pci=" kernel boot arguments
2695 *
2696 * Process kernel boot arguments. This is the default implementation.
2697 * Architecture specific implementations can override this as necessary.
2698 */
2699char * __weak __init pcibios_setup(char *str)
2700{
2701 return str;
2702}
2703
96c55900
MS
2704/**
2705 * pcibios_set_master - enable PCI bus-mastering for device dev
2706 * @dev: the PCI device to enable
2707 *
2708 * Enables PCI bus-mastering for the device. This is the default
2709 * implementation. Architecture specific implementations can override
2710 * this if necessary.
2711 */
2712void __weak pcibios_set_master(struct pci_dev *dev)
2713{
2714 u8 lat;
2715
f676678f
MS
2716 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2717 if (pci_is_pcie(dev))
2718 return;
2719
96c55900
MS
2720 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2721 if (lat < 16)
2722 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2723 else if (lat > pcibios_max_latency)
2724 lat = pcibios_max_latency;
2725 else
2726 return;
2727 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2728 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2729}
2730
1da177e4
LT
2731/**
2732 * pci_set_master - enables bus-mastering for device dev
2733 * @dev: the PCI device to enable
2734 *
2735 * Enables bus-mastering on the device and calls pcibios_set_master()
2736 * to do the needed arch specific settings.
2737 */
6a479079 2738void pci_set_master(struct pci_dev *dev)
1da177e4 2739{
6a479079 2740 __pci_set_master(dev, true);
1da177e4
LT
2741 pcibios_set_master(dev);
2742}
2743
6a479079
BH
2744/**
2745 * pci_clear_master - disables bus-mastering for device dev
2746 * @dev: the PCI device to disable
2747 */
2748void pci_clear_master(struct pci_dev *dev)
2749{
2750 __pci_set_master(dev, false);
2751}
2752
1da177e4 2753/**
edb2d97e
MW
2754 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2755 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2756 *
edb2d97e
MW
2757 * Helper function for pci_set_mwi.
2758 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2759 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2760 *
2761 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2762 */
15ea76d4 2763int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2764{
2765 u8 cacheline_size;
2766
2767 if (!pci_cache_line_size)
15ea76d4 2768 return -EINVAL;
1da177e4
LT
2769
2770 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2771 equal to or multiple of the right value. */
2772 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2773 if (cacheline_size >= pci_cache_line_size &&
2774 (cacheline_size % pci_cache_line_size) == 0)
2775 return 0;
2776
2777 /* Write the correct value. */
2778 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2779 /* Read it back. */
2780 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2781 if (cacheline_size == pci_cache_line_size)
2782 return 0;
2783
80ccba11
BH
2784 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2785 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2786
2787 return -EINVAL;
2788}
15ea76d4
TH
2789EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2790
2791#ifdef PCI_DISABLE_MWI
2792int pci_set_mwi(struct pci_dev *dev)
2793{
2794 return 0;
2795}
2796
2797int pci_try_set_mwi(struct pci_dev *dev)
2798{
2799 return 0;
2800}
2801
2802void pci_clear_mwi(struct pci_dev *dev)
2803{
2804}
2805
2806#else
1da177e4
LT
2807
2808/**
2809 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2810 * @dev: the PCI device for which MWI is enabled
2811 *
694625c0 2812 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2813 *
2814 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2815 */
2816int
2817pci_set_mwi(struct pci_dev *dev)
2818{
2819 int rc;
2820 u16 cmd;
2821
edb2d97e 2822 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2823 if (rc)
2824 return rc;
2825
2826 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2827 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2828 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2829 cmd |= PCI_COMMAND_INVALIDATE;
2830 pci_write_config_word(dev, PCI_COMMAND, cmd);
2831 }
2832
2833 return 0;
2834}
2835
694625c0
RD
2836/**
2837 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2838 * @dev: the PCI device for which MWI is enabled
2839 *
2840 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2841 * Callers are not required to check the return value.
2842 *
2843 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2844 */
2845int pci_try_set_mwi(struct pci_dev *dev)
2846{
2847 int rc = pci_set_mwi(dev);
2848 return rc;
2849}
2850
1da177e4
LT
2851/**
2852 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2853 * @dev: the PCI device to disable
2854 *
2855 * Disables PCI Memory-Write-Invalidate transaction on the device
2856 */
2857void
2858pci_clear_mwi(struct pci_dev *dev)
2859{
2860 u16 cmd;
2861
2862 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2863 if (cmd & PCI_COMMAND_INVALIDATE) {
2864 cmd &= ~PCI_COMMAND_INVALIDATE;
2865 pci_write_config_word(dev, PCI_COMMAND, cmd);
2866 }
2867}
edb2d97e 2868#endif /* ! PCI_DISABLE_MWI */
1da177e4 2869
a04ce0ff
BR
2870/**
2871 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2872 * @pdev: the PCI device to operate on
2873 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2874 *
2875 * Enables/disables PCI INTx for device dev
2876 */
2877void
2878pci_intx(struct pci_dev *pdev, int enable)
2879{
2880 u16 pci_command, new;
2881
2882 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2883
2884 if (enable) {
2885 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2886 } else {
2887 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2888 }
2889
2890 if (new != pci_command) {
9ac7849e
TH
2891 struct pci_devres *dr;
2892
2fd9d74b 2893 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2894
2895 dr = find_pci_dr(pdev);
2896 if (dr && !dr->restore_intx) {
2897 dr->restore_intx = 1;
2898 dr->orig_intx = !enable;
2899 }
a04ce0ff
BR
2900 }
2901}
2902
a2e27787
JK
2903/**
2904 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2905 * @dev: the PCI device to operate on
a2e27787
JK
2906 *
2907 * Check if the device dev support INTx masking via the config space
2908 * command word.
2909 */
2910bool pci_intx_mask_supported(struct pci_dev *dev)
2911{
2912 bool mask_supported = false;
2913 u16 orig, new;
2914
fbebb9fd
BH
2915 if (dev->broken_intx_masking)
2916 return false;
2917
a2e27787
JK
2918 pci_cfg_access_lock(dev);
2919
2920 pci_read_config_word(dev, PCI_COMMAND, &orig);
2921 pci_write_config_word(dev, PCI_COMMAND,
2922 orig ^ PCI_COMMAND_INTX_DISABLE);
2923 pci_read_config_word(dev, PCI_COMMAND, &new);
2924
2925 /*
2926 * There's no way to protect against hardware bugs or detect them
2927 * reliably, but as long as we know what the value should be, let's
2928 * go ahead and check it.
2929 */
2930 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2931 dev_err(&dev->dev, "Command register changed from "
2932 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2933 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2934 mask_supported = true;
2935 pci_write_config_word(dev, PCI_COMMAND, orig);
2936 }
2937
2938 pci_cfg_access_unlock(dev);
2939 return mask_supported;
2940}
2941EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2942
2943static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2944{
2945 struct pci_bus *bus = dev->bus;
2946 bool mask_updated = true;
2947 u32 cmd_status_dword;
2948 u16 origcmd, newcmd;
2949 unsigned long flags;
2950 bool irq_pending;
2951
2952 /*
2953 * We do a single dword read to retrieve both command and status.
2954 * Document assumptions that make this possible.
2955 */
2956 BUILD_BUG_ON(PCI_COMMAND % 4);
2957 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2958
2959 raw_spin_lock_irqsave(&pci_lock, flags);
2960
2961 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2962
2963 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2964
2965 /*
2966 * Check interrupt status register to see whether our device
2967 * triggered the interrupt (when masking) or the next IRQ is
2968 * already pending (when unmasking).
2969 */
2970 if (mask != irq_pending) {
2971 mask_updated = false;
2972 goto done;
2973 }
2974
2975 origcmd = cmd_status_dword;
2976 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2977 if (mask)
2978 newcmd |= PCI_COMMAND_INTX_DISABLE;
2979 if (newcmd != origcmd)
2980 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2981
2982done:
2983 raw_spin_unlock_irqrestore(&pci_lock, flags);
2984
2985 return mask_updated;
2986}
2987
2988/**
2989 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 2990 * @dev: the PCI device to operate on
a2e27787
JK
2991 *
2992 * Check if the device dev has its INTx line asserted, mask it and
2993 * return true in that case. False is returned if not interrupt was
2994 * pending.
2995 */
2996bool pci_check_and_mask_intx(struct pci_dev *dev)
2997{
2998 return pci_check_and_set_intx_mask(dev, true);
2999}
3000EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3001
3002/**
3003 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
6e9292c5 3004 * @dev: the PCI device to operate on
a2e27787
JK
3005 *
3006 * Check if the device dev has its INTx line asserted, unmask it if not
3007 * and return true. False is returned and the mask remains active if
3008 * there was still an interrupt pending.
3009 */
3010bool pci_check_and_unmask_intx(struct pci_dev *dev)
3011{
3012 return pci_check_and_set_intx_mask(dev, false);
3013}
3014EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3015
f5f2b131
EB
3016/**
3017 * pci_msi_off - disables any msi or msix capabilities
8d7d86e9 3018 * @dev: the PCI device to operate on
f5f2b131
EB
3019 *
3020 * If you want to use msi see pci_enable_msi and friends.
3021 * This is a lower level primitive that allows us to disable
3022 * msi operation at the device level.
3023 */
3024void pci_msi_off(struct pci_dev *dev)
3025{
3026 int pos;
3027 u16 control;
3028
3029 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3030 if (pos) {
3031 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3032 control &= ~PCI_MSI_FLAGS_ENABLE;
3033 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3034 }
3035 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3036 if (pos) {
3037 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3038 control &= ~PCI_MSIX_FLAGS_ENABLE;
3039 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3040 }
3041}
b03214d5 3042EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 3043
4d57cdfa
FT
3044int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3045{
3046 return dma_set_max_seg_size(&dev->dev, size);
3047}
3048EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3049
59fc67de
FT
3050int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3051{
3052 return dma_set_seg_boundary(&dev->dev, mask);
3053}
3054EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3055
8c1c699f 3056static int pcie_flr(struct pci_dev *dev, int probe)
8dd7f803 3057{
8c1c699f 3058 int i;
8dd7f803 3059 u32 cap;
59875ae4 3060 u16 status;
8c1c699f 3061
59875ae4 3062 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
8dd7f803
SY
3063 if (!(cap & PCI_EXP_DEVCAP_FLR))
3064 return -ENOTTY;
3065
d91cdc74
SY
3066 if (probe)
3067 return 0;
3068
8dd7f803 3069 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3070 for (i = 0; i < 4; i++) {
3071 if (i)
3072 msleep((1 << (i - 1)) * 100);
5fe5db05 3073
59875ae4 3074 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
8c1c699f
YZ
3075 if (!(status & PCI_EXP_DEVSTA_TRPND))
3076 goto clear;
3077 }
3078
3079 dev_err(&dev->dev, "transaction is not cleared; "
3080 "proceeding with reset anyway\n");
3081
3082clear:
59875ae4 3083 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
04b55c47 3084
8c1c699f 3085 msleep(100);
8dd7f803 3086
8dd7f803
SY
3087 return 0;
3088}
d91cdc74 3089
8c1c699f 3090static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3091{
8c1c699f
YZ
3092 int i;
3093 int pos;
1ca88797 3094 u8 cap;
8c1c699f 3095 u8 status;
1ca88797 3096
8c1c699f
YZ
3097 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3098 if (!pos)
1ca88797 3099 return -ENOTTY;
8c1c699f
YZ
3100
3101 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3102 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3103 return -ENOTTY;
3104
3105 if (probe)
3106 return 0;
3107
1ca88797 3108 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3109 for (i = 0; i < 4; i++) {
3110 if (i)
3111 msleep((1 << (i - 1)) * 100);
3112
3113 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3114 if (!(status & PCI_AF_STATUS_TP))
3115 goto clear;
3116 }
5fe5db05 3117
8c1c699f
YZ
3118 dev_err(&dev->dev, "transaction is not cleared; "
3119 "proceeding with reset anyway\n");
5fe5db05 3120
8c1c699f
YZ
3121clear:
3122 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3123 msleep(100);
8c1c699f 3124
1ca88797
SY
3125 return 0;
3126}
3127
83d74e03
RW
3128/**
3129 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3130 * @dev: Device to reset.
3131 * @probe: If set, only check if the device can be reset this way.
3132 *
3133 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3134 * unset, it will be reinitialized internally when going from PCI_D3hot to
3135 * PCI_D0. If that's the case and the device is not in a low-power state
3136 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3137 *
3138 * NOTE: This causes the caller to sleep for twice the device power transition
3139 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3140 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3141 * Moreover, only devices in D0 can be reset by this function.
3142 */
f85876ba 3143static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3144{
f85876ba
YZ
3145 u16 csr;
3146
3147 if (!dev->pm_cap)
3148 return -ENOTTY;
d91cdc74 3149
f85876ba
YZ
3150 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3151 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3152 return -ENOTTY;
d91cdc74 3153
f85876ba
YZ
3154 if (probe)
3155 return 0;
1ca88797 3156
f85876ba
YZ
3157 if (dev->current_state != PCI_D0)
3158 return -EINVAL;
3159
3160 csr &= ~PCI_PM_CTRL_STATE_MASK;
3161 csr |= PCI_D3hot;
3162 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3163 pci_dev_d3_sleep(dev);
f85876ba
YZ
3164
3165 csr &= ~PCI_PM_CTRL_STATE_MASK;
3166 csr |= PCI_D0;
3167 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3168 pci_dev_d3_sleep(dev);
f85876ba
YZ
3169
3170 return 0;
3171}
3172
c12ff1df
YZ
3173static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3174{
3175 u16 ctrl;
3176 struct pci_dev *pdev;
3177
654b75e0 3178 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3179 return -ENOTTY;
3180
3181 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3182 if (pdev != dev)
3183 return -ENOTTY;
3184
3185 if (probe)
3186 return 0;
3187
3188 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3189 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3190 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3191 msleep(100);
3192
3193 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3194 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3195 msleep(100);
3196
3197 return 0;
3198}
3199
977f857c 3200static int __pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3201{
8c1c699f
YZ
3202 int rc;
3203
3204 might_sleep();
3205
b9c3b266
DC
3206 rc = pci_dev_specific_reset(dev, probe);
3207 if (rc != -ENOTTY)
3208 goto done;
3209
8c1c699f
YZ
3210 rc = pcie_flr(dev, probe);
3211 if (rc != -ENOTTY)
3212 goto done;
d91cdc74 3213
8c1c699f 3214 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3215 if (rc != -ENOTTY)
3216 goto done;
3217
3218 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3219 if (rc != -ENOTTY)
3220 goto done;
3221
3222 rc = pci_parent_bus_reset(dev, probe);
8c1c699f 3223done:
977f857c
KRW
3224 return rc;
3225}
3226
3227static int pci_dev_reset(struct pci_dev *dev, int probe)
3228{
3229 int rc;
3230
3231 if (!probe) {
3232 pci_cfg_access_lock(dev);
3233 /* block PM suspend, driver probe, etc. */
3234 device_lock(&dev->dev);
3235 }
3236
3237 rc = __pci_dev_reset(dev, probe);
3238
8c1c699f 3239 if (!probe) {
8e9394ce 3240 device_unlock(&dev->dev);
fb51ccbf 3241 pci_cfg_access_unlock(dev);
8c1c699f 3242 }
8c1c699f 3243 return rc;
d91cdc74 3244}
d91cdc74 3245/**
8c1c699f
YZ
3246 * __pci_reset_function - reset a PCI device function
3247 * @dev: PCI device to reset
d91cdc74
SY
3248 *
3249 * Some devices allow an individual function to be reset without affecting
3250 * other functions in the same device. The PCI device must be responsive
3251 * to PCI config space in order to use this function.
3252 *
3253 * The device function is presumed to be unused when this function is called.
3254 * Resetting the device will make the contents of PCI configuration space
3255 * random, so any caller of this must be prepared to reinitialise the
3256 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3257 * etc.
3258 *
8c1c699f 3259 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3260 * device doesn't support resetting a single function.
3261 */
8c1c699f 3262int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3263{
8c1c699f 3264 return pci_dev_reset(dev, 0);
d91cdc74 3265}
8c1c699f 3266EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3267
6fbf9e7a
KRW
3268/**
3269 * __pci_reset_function_locked - reset a PCI device function while holding
3270 * the @dev mutex lock.
3271 * @dev: PCI device to reset
3272 *
3273 * Some devices allow an individual function to be reset without affecting
3274 * other functions in the same device. The PCI device must be responsive
3275 * to PCI config space in order to use this function.
3276 *
3277 * The device function is presumed to be unused and the caller is holding
3278 * the device mutex lock when this function is called.
3279 * Resetting the device will make the contents of PCI configuration space
3280 * random, so any caller of this must be prepared to reinitialise the
3281 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3282 * etc.
3283 *
3284 * Returns 0 if the device function was successfully reset or negative if the
3285 * device doesn't support resetting a single function.
3286 */
3287int __pci_reset_function_locked(struct pci_dev *dev)
3288{
977f857c 3289 return __pci_dev_reset(dev, 0);
6fbf9e7a
KRW
3290}
3291EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3292
711d5779
MT
3293/**
3294 * pci_probe_reset_function - check whether the device can be safely reset
3295 * @dev: PCI device to reset
3296 *
3297 * Some devices allow an individual function to be reset without affecting
3298 * other functions in the same device. The PCI device must be responsive
3299 * to PCI config space in order to use this function.
3300 *
3301 * Returns 0 if the device function can be reset or negative if the
3302 * device doesn't support resetting a single function.
3303 */
3304int pci_probe_reset_function(struct pci_dev *dev)
3305{
3306 return pci_dev_reset(dev, 1);
3307}
3308
8dd7f803 3309/**
8c1c699f
YZ
3310 * pci_reset_function - quiesce and reset a PCI device function
3311 * @dev: PCI device to reset
8dd7f803
SY
3312 *
3313 * Some devices allow an individual function to be reset without affecting
3314 * other functions in the same device. The PCI device must be responsive
3315 * to PCI config space in order to use this function.
3316 *
3317 * This function does not just reset the PCI portion of a device, but
3318 * clears all the state associated with the device. This function differs
8c1c699f 3319 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3320 * over the reset.
3321 *
8c1c699f 3322 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3323 * device doesn't support resetting a single function.
3324 */
3325int pci_reset_function(struct pci_dev *dev)
3326{
8c1c699f 3327 int rc;
8dd7f803 3328
8c1c699f
YZ
3329 rc = pci_dev_reset(dev, 1);
3330 if (rc)
3331 return rc;
8dd7f803 3332
8dd7f803
SY
3333 pci_save_state(dev);
3334
8c1c699f
YZ
3335 /*
3336 * both INTx and MSI are disabled after the Interrupt Disable bit
3337 * is set and the Bus Master bit is cleared.
3338 */
8dd7f803
SY
3339 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3340
8c1c699f 3341 rc = pci_dev_reset(dev, 0);
8dd7f803
SY
3342
3343 pci_restore_state(dev);
8dd7f803 3344
8c1c699f 3345 return rc;
8dd7f803
SY
3346}
3347EXPORT_SYMBOL_GPL(pci_reset_function);
3348
d556ad4b
PO
3349/**
3350 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3351 * @dev: PCI device to query
3352 *
3353 * Returns mmrbc: maximum designed memory read count in bytes
3354 * or appropriate error value.
3355 */
3356int pcix_get_max_mmrbc(struct pci_dev *dev)
3357{
7c9e2b1c 3358 int cap;
d556ad4b
PO
3359 u32 stat;
3360
3361 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3362 if (!cap)
3363 return -EINVAL;
3364
7c9e2b1c 3365 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3366 return -EINVAL;
3367
25daeb55 3368 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3369}
3370EXPORT_SYMBOL(pcix_get_max_mmrbc);
3371
3372/**
3373 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3374 * @dev: PCI device to query
3375 *
3376 * Returns mmrbc: maximum memory read count in bytes
3377 * or appropriate error value.
3378 */
3379int pcix_get_mmrbc(struct pci_dev *dev)
3380{
7c9e2b1c 3381 int cap;
bdc2bda7 3382 u16 cmd;
d556ad4b
PO
3383
3384 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3385 if (!cap)
3386 return -EINVAL;
3387
7c9e2b1c
DN
3388 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3389 return -EINVAL;
d556ad4b 3390
7c9e2b1c 3391 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3392}
3393EXPORT_SYMBOL(pcix_get_mmrbc);
3394
3395/**
3396 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3397 * @dev: PCI device to query
3398 * @mmrbc: maximum memory read count in bytes
3399 * valid values are 512, 1024, 2048, 4096
3400 *
3401 * If possible sets maximum memory read byte count, some bridges have erratas
3402 * that prevent this.
3403 */
3404int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3405{
7c9e2b1c 3406 int cap;
bdc2bda7
DN
3407 u32 stat, v, o;
3408 u16 cmd;
d556ad4b 3409
229f5afd 3410 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3411 return -EINVAL;
d556ad4b
PO
3412
3413 v = ffs(mmrbc) - 10;
3414
3415 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3416 if (!cap)
7c9e2b1c 3417 return -EINVAL;
d556ad4b 3418
7c9e2b1c
DN
3419 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3420 return -EINVAL;
d556ad4b
PO
3421
3422 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3423 return -E2BIG;
3424
7c9e2b1c
DN
3425 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3426 return -EINVAL;
d556ad4b
PO
3427
3428 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3429 if (o != v) {
809a3bf9 3430 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
d556ad4b
PO
3431 return -EIO;
3432
3433 cmd &= ~PCI_X_CMD_MAX_READ;
3434 cmd |= v << 2;
7c9e2b1c
DN
3435 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3436 return -EIO;
d556ad4b 3437 }
7c9e2b1c 3438 return 0;
d556ad4b
PO
3439}
3440EXPORT_SYMBOL(pcix_set_mmrbc);
3441
3442/**
3443 * pcie_get_readrq - get PCI Express read request size
3444 * @dev: PCI device to query
3445 *
3446 * Returns maximum memory read request in bytes
3447 * or appropriate error value.
3448 */
3449int pcie_get_readrq(struct pci_dev *dev)
3450{
d556ad4b
PO
3451 u16 ctl;
3452
59875ae4 3453 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
d556ad4b 3454
59875ae4 3455 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3456}
3457EXPORT_SYMBOL(pcie_get_readrq);
3458
3459/**
3460 * pcie_set_readrq - set PCI Express maximum memory read request
3461 * @dev: PCI device to query
42e61f4a 3462 * @rq: maximum memory read count in bytes
d556ad4b
PO
3463 * valid values are 128, 256, 512, 1024, 2048, 4096
3464 *
c9b378c7 3465 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3466 */
3467int pcie_set_readrq(struct pci_dev *dev, int rq)
3468{
59875ae4 3469 u16 v;
d556ad4b 3470
229f5afd 3471 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
59875ae4 3472 return -EINVAL;
d556ad4b 3473
a1c473aa
BH
3474 /*
3475 * If using the "performance" PCIe config, we clamp the
3476 * read rq size to the max packet size to prevent the
3477 * host bridge generating requests larger than we can
3478 * cope with
3479 */
3480 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3481 int mps = pcie_get_mps(dev);
3482
3483 if (mps < 0)
3484 return mps;
3485 if (mps < rq)
3486 rq = mps;
3487 }
3488
3489 v = (ffs(rq) - 8) << 12;
d556ad4b 3490
59875ae4
JL
3491 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3492 PCI_EXP_DEVCTL_READRQ, v);
d556ad4b
PO
3493}
3494EXPORT_SYMBOL(pcie_set_readrq);
3495
b03e7495
JM
3496/**
3497 * pcie_get_mps - get PCI Express maximum payload size
3498 * @dev: PCI device to query
3499 *
3500 * Returns maximum payload size in bytes
3501 * or appropriate error value.
3502 */
3503int pcie_get_mps(struct pci_dev *dev)
3504{
b03e7495
JM
3505 u16 ctl;
3506
59875ae4 3507 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
b03e7495 3508
59875ae4 3509 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
b03e7495
JM
3510}
3511
3512/**
3513 * pcie_set_mps - set PCI Express maximum payload size
3514 * @dev: PCI device to query
47c08f31 3515 * @mps: maximum payload size in bytes
b03e7495
JM
3516 * valid values are 128, 256, 512, 1024, 2048, 4096
3517 *
3518 * If possible sets maximum payload size
3519 */
3520int pcie_set_mps(struct pci_dev *dev, int mps)
3521{
59875ae4 3522 u16 v;
b03e7495
JM
3523
3524 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
59875ae4 3525 return -EINVAL;
b03e7495
JM
3526
3527 v = ffs(mps) - 8;
3528 if (v > dev->pcie_mpss)
59875ae4 3529 return -EINVAL;
b03e7495
JM
3530 v <<= 5;
3531
59875ae4
JL
3532 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
3533 PCI_EXP_DEVCTL_PAYLOAD, v);
b03e7495
JM
3534}
3535
c87deff7
HS
3536/**
3537 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 3538 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
3539 * @flags: resource type mask to be selected
3540 *
3541 * This helper routine makes bar mask from the type of resource.
3542 */
3543int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3544{
3545 int i, bars = 0;
3546 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3547 if (pci_resource_flags(dev, i) & flags)
3548 bars |= (1 << i);
3549 return bars;
3550}
3551
613e7ed6
YZ
3552/**
3553 * pci_resource_bar - get position of the BAR associated with a resource
3554 * @dev: the PCI device
3555 * @resno: the resource number
3556 * @type: the BAR type to be filled in
3557 *
3558 * Returns BAR position in config space, or 0 if the BAR is invalid.
3559 */
3560int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3561{
d1b054da
YZ
3562 int reg;
3563
613e7ed6
YZ
3564 if (resno < PCI_ROM_RESOURCE) {
3565 *type = pci_bar_unknown;
3566 return PCI_BASE_ADDRESS_0 + 4 * resno;
3567 } else if (resno == PCI_ROM_RESOURCE) {
3568 *type = pci_bar_mem32;
3569 return dev->rom_base_reg;
d1b054da
YZ
3570 } else if (resno < PCI_BRIDGE_RESOURCES) {
3571 /* device specific resource */
3572 reg = pci_iov_resource_bar(dev, resno, type);
3573 if (reg)
3574 return reg;
613e7ed6
YZ
3575 }
3576
865df576 3577 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
3578 return 0;
3579}
3580
95a8b6ef
MT
3581/* Some architectures require additional programming to enable VGA */
3582static arch_set_vga_state_t arch_set_vga_state;
3583
3584void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3585{
3586 arch_set_vga_state = func; /* NULL disables */
3587}
3588
3589static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 3590 unsigned int command_bits, u32 flags)
95a8b6ef
MT
3591{
3592 if (arch_set_vga_state)
3593 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 3594 flags);
95a8b6ef
MT
3595 return 0;
3596}
3597
deb2d2ec
BH
3598/**
3599 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
3600 * @dev: the PCI device
3601 * @decode: true = enable decoding, false = disable decoding
3602 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 3603 * @flags: traverse ancestors and change bridges
3448a19d 3604 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
3605 */
3606int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 3607 unsigned int command_bits, u32 flags)
deb2d2ec
BH
3608{
3609 struct pci_bus *bus;
3610 struct pci_dev *bridge;
3611 u16 cmd;
95a8b6ef 3612 int rc;
deb2d2ec 3613
3448a19d 3614 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 3615
95a8b6ef 3616 /* ARCH specific VGA enables */
3448a19d 3617 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
3618 if (rc)
3619 return rc;
3620
3448a19d
DA
3621 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3622 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3623 if (decode == true)
3624 cmd |= command_bits;
3625 else
3626 cmd &= ~command_bits;
3627 pci_write_config_word(dev, PCI_COMMAND, cmd);
3628 }
deb2d2ec 3629
3448a19d 3630 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
3631 return 0;
3632
3633 bus = dev->bus;
3634 while (bus) {
3635 bridge = bus->self;
3636 if (bridge) {
3637 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3638 &cmd);
3639 if (decode == true)
3640 cmd |= PCI_BRIDGE_CTL_VGA;
3641 else
3642 cmd &= ~PCI_BRIDGE_CTL_VGA;
3643 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3644 cmd);
3645 }
3646 bus = bus->parent;
3647 }
3648 return 0;
3649}
3650
32a9a682
YS
3651#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3652static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 3653static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
3654
3655/**
3656 * pci_specified_resource_alignment - get resource alignment specified by user.
3657 * @dev: the PCI device to get
3658 *
3659 * RETURNS: Resource alignment if it is specified.
3660 * Zero if it is not specified.
3661 */
3662resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3663{
3664 int seg, bus, slot, func, align_order, count;
3665 resource_size_t align = 0;
3666 char *p;
3667
3668 spin_lock(&resource_alignment_lock);
3669 p = resource_alignment_param;
3670 while (*p) {
3671 count = 0;
3672 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3673 p[count] == '@') {
3674 p += count + 1;
3675 } else {
3676 align_order = -1;
3677 }
3678 if (sscanf(p, "%x:%x:%x.%x%n",
3679 &seg, &bus, &slot, &func, &count) != 4) {
3680 seg = 0;
3681 if (sscanf(p, "%x:%x.%x%n",
3682 &bus, &slot, &func, &count) != 3) {
3683 /* Invalid format */
3684 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3685 p);
3686 break;
3687 }
3688 }
3689 p += count;
3690 if (seg == pci_domain_nr(dev->bus) &&
3691 bus == dev->bus->number &&
3692 slot == PCI_SLOT(dev->devfn) &&
3693 func == PCI_FUNC(dev->devfn)) {
3694 if (align_order == -1) {
3695 align = PAGE_SIZE;
3696 } else {
3697 align = 1 << align_order;
3698 }
3699 /* Found */
3700 break;
3701 }
3702 if (*p != ';' && *p != ',') {
3703 /* End of param or invalid format */
3704 break;
3705 }
3706 p++;
3707 }
3708 spin_unlock(&resource_alignment_lock);
3709 return align;
3710}
3711
3712/**
3713 * pci_is_reassigndev - check if specified PCI is target device to reassign
3714 * @dev: the PCI device to check
3715 *
3716 * RETURNS: non-zero for PCI device is a target device to reassign,
3717 * or zero is not.
3718 */
3719int pci_is_reassigndev(struct pci_dev *dev)
3720{
3721 return (pci_specified_resource_alignment(dev) != 0);
3722}
3723
2069ecfb
YL
3724/*
3725 * This function disables memory decoding and releases memory resources
3726 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3727 * It also rounds up size to specified alignment.
3728 * Later on, the kernel will assign page-aligned memory resource back
3729 * to the device.
3730 */
3731void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3732{
3733 int i;
3734 struct resource *r;
3735 resource_size_t align, size;
3736 u16 command;
3737
3738 if (!pci_is_reassigndev(dev))
3739 return;
3740
3741 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3742 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3743 dev_warn(&dev->dev,
3744 "Can't reassign resources to host bridge.\n");
3745 return;
3746 }
3747
3748 dev_info(&dev->dev,
3749 "Disabling memory decoding and releasing memory resources.\n");
3750 pci_read_config_word(dev, PCI_COMMAND, &command);
3751 command &= ~PCI_COMMAND_MEMORY;
3752 pci_write_config_word(dev, PCI_COMMAND, command);
3753
3754 align = pci_specified_resource_alignment(dev);
3755 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3756 r = &dev->resource[i];
3757 if (!(r->flags & IORESOURCE_MEM))
3758 continue;
3759 size = resource_size(r);
3760 if (size < align) {
3761 size = align;
3762 dev_info(&dev->dev,
3763 "Rounding up size of resource #%d to %#llx.\n",
3764 i, (unsigned long long)size);
3765 }
3766 r->end = size - 1;
3767 r->start = 0;
3768 }
3769 /* Need to disable bridge's resource window,
3770 * to enable the kernel to reassign new resource
3771 * window later on.
3772 */
3773 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3774 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3775 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3776 r = &dev->resource[i];
3777 if (!(r->flags & IORESOURCE_MEM))
3778 continue;
3779 r->end = resource_size(r) - 1;
3780 r->start = 0;
3781 }
3782 pci_disable_bridge_window(dev);
3783 }
3784}
3785
32a9a682
YS
3786ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3787{
3788 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3789 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3790 spin_lock(&resource_alignment_lock);
3791 strncpy(resource_alignment_param, buf, count);
3792 resource_alignment_param[count] = '\0';
3793 spin_unlock(&resource_alignment_lock);
3794 return count;
3795}
3796
3797ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3798{
3799 size_t count;
3800 spin_lock(&resource_alignment_lock);
3801 count = snprintf(buf, size, "%s", resource_alignment_param);
3802 spin_unlock(&resource_alignment_lock);
3803 return count;
3804}
3805
3806static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3807{
3808 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3809}
3810
3811static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3812 const char *buf, size_t count)
3813{
3814 return pci_set_resource_alignment_param(buf, count);
3815}
3816
3817BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3818 pci_resource_alignment_store);
3819
3820static int __init pci_resource_alignment_sysfs_init(void)
3821{
3822 return bus_create_file(&pci_bus_type,
3823 &bus_attr_resource_alignment);
3824}
3825
3826late_initcall(pci_resource_alignment_sysfs_init);
3827
32a2eea7
JG
3828static void __devinit pci_no_domains(void)
3829{
3830#ifdef CONFIG_PCI_DOMAINS
3831 pci_domains_supported = 0;
3832#endif
3833}
3834
0ef5f8f6
AP
3835/**
3836 * pci_ext_cfg_enabled - can we access extended PCI config space?
3837 * @dev: The PCI device of the root bridge.
3838 *
3839 * Returns 1 if we can access PCI extended config space (offsets
3840 * greater than 0xff). This is the default implementation. Architecture
3841 * implementations can override this.
3842 */
d6d88c83 3843int __weak pci_ext_cfg_avail(struct pci_dev *dev)
0ef5f8f6
AP
3844{
3845 return 1;
3846}
3847
2d1c8618
BH
3848void __weak pci_fixup_cardbus(struct pci_bus *bus)
3849{
3850}
3851EXPORT_SYMBOL(pci_fixup_cardbus);
3852
ad04d31e 3853static int __init pci_setup(char *str)
1da177e4
LT
3854{
3855 while (str) {
3856 char *k = strchr(str, ',');
3857 if (k)
3858 *k++ = 0;
3859 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
3860 if (!strcmp(str, "nomsi")) {
3861 pci_no_msi();
7f785763
RD
3862 } else if (!strcmp(str, "noaer")) {
3863 pci_no_aer();
b55438fd
YL
3864 } else if (!strncmp(str, "realloc=", 8)) {
3865 pci_realloc_get_opt(str + 8);
f483d392 3866 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 3867 pci_realloc_get_opt("on");
32a2eea7
JG
3868 } else if (!strcmp(str, "nodomains")) {
3869 pci_no_domains();
6748dcc2
RW
3870 } else if (!strncmp(str, "noari", 5)) {
3871 pcie_ari_disabled = true;
4516a618
AN
3872 } else if (!strncmp(str, "cbiosize=", 9)) {
3873 pci_cardbus_io_size = memparse(str + 9, &str);
3874 } else if (!strncmp(str, "cbmemsize=", 10)) {
3875 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
3876 } else if (!strncmp(str, "resource_alignment=", 19)) {
3877 pci_set_resource_alignment_param(str + 19,
3878 strlen(str + 19));
43c16408
AP
3879 } else if (!strncmp(str, "ecrc=", 5)) {
3880 pcie_ecrc_get_policy(str + 5);
28760489
EB
3881 } else if (!strncmp(str, "hpiosize=", 9)) {
3882 pci_hotplug_io_size = memparse(str + 9, &str);
3883 } else if (!strncmp(str, "hpmemsize=", 10)) {
3884 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
3885 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3886 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
3887 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3888 pcie_bus_config = PCIE_BUS_SAFE;
3889 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3890 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
3891 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3892 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
3893 } else if (!strncmp(str, "pcie_scan_all", 13)) {
3894 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
3895 } else {
3896 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3897 str);
3898 }
1da177e4
LT
3899 }
3900 str = k;
3901 }
0637a70a 3902 return 0;
1da177e4 3903}
0637a70a 3904early_param("pci", pci_setup);
1da177e4 3905
0b62e13b 3906EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
3907EXPORT_SYMBOL(pci_enable_device_io);
3908EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 3909EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
3910EXPORT_SYMBOL(pcim_enable_device);
3911EXPORT_SYMBOL(pcim_pin_device);
1da177e4 3912EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
3913EXPORT_SYMBOL(pci_find_capability);
3914EXPORT_SYMBOL(pci_bus_find_capability);
3915EXPORT_SYMBOL(pci_release_regions);
3916EXPORT_SYMBOL(pci_request_regions);
e8de1481 3917EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
3918EXPORT_SYMBOL(pci_release_region);
3919EXPORT_SYMBOL(pci_request_region);
e8de1481 3920EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
3921EXPORT_SYMBOL(pci_release_selected_regions);
3922EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 3923EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 3924EXPORT_SYMBOL(pci_set_master);
6a479079 3925EXPORT_SYMBOL(pci_clear_master);
1da177e4 3926EXPORT_SYMBOL(pci_set_mwi);
694625c0 3927EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 3928EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 3929EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
3930EXPORT_SYMBOL(pci_assign_resource);
3931EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 3932EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
3933
3934EXPORT_SYMBOL(pci_set_power_state);
3935EXPORT_SYMBOL(pci_save_state);
3936EXPORT_SYMBOL(pci_restore_state);
e5899e1b 3937EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 3938EXPORT_SYMBOL(pci_pme_active);
0235c4fc 3939EXPORT_SYMBOL(pci_wake_from_d3);
e5899e1b 3940EXPORT_SYMBOL(pci_target_state);
404cc2d8
RW
3941EXPORT_SYMBOL(pci_prepare_to_sleep);
3942EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 3943EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);