]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/pci/pci.c
Linux 3.4-rc4
[mirror_ubuntu-artful-kernel.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
32a9a682 25#include <asm/setup.h>
bc56b9e0 26#include "pci.h"
1da177e4 27
00240c38
AS
28const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30};
31EXPORT_SYMBOL_GPL(pci_power_names);
32
93177a74
RW
33int isa_dma_bridge_buggy;
34EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36int pci_pci_problems;
37EXPORT_SYMBOL(pci_pci_problems);
38
1ae861e6
RW
39unsigned int pci_pm_d3_delay;
40
df17e62e
MG
41static void pci_pme_list_scan(struct work_struct *work);
42
43static LIST_HEAD(pci_pme_list);
44static DEFINE_MUTEX(pci_pme_list_mutex);
45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50};
51
52#define PME_TIMEOUT 1000 /* How long between PME checks */
53
1ae861e6
RW
54static void pci_dev_d3_sleep(struct pci_dev *dev)
55{
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62}
1da177e4 63
32a2eea7
JG
64#ifdef CONFIG_PCI_DOMAINS
65int pci_domains_supported = 1;
66#endif
67
4516a618
AN
68#define DEFAULT_CARDBUS_IO_SIZE (256)
69#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70/* pci=cbmemsize=nnM,cbiosize=nn can override this */
71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
28760489
EB
74#define DEFAULT_HOTPLUG_IO_SIZE (256)
75#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76/* pci=hpmemsize=nnM,hpiosize=nn can override this */
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
5f39e670 80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 81
ac1aa47b
JB
82/*
83 * The default CLS is used if arch didn't set CLS explicitly and not
84 * all pci devices agree on the same value. Arch can override either
85 * the dfl or actual value as it sees fit. Don't forget this is
86 * measured in 32-bit words, not bytes.
87 */
98e724c7 88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
89u8 pci_cache_line_size;
90
96c55900
MS
91/*
92 * If we set up a device for bus mastering, we need to check the latency
93 * timer as certain BIOSes forget to set it properly.
94 */
95unsigned int pcibios_max_latency = 255;
96
6748dcc2
RW
97/* If set, the PCIe ARI capability will not be used. */
98static bool pcie_ari_disabled;
99
1da177e4
LT
100/**
101 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
102 * @bus: pointer to PCI bus structure to search
103 *
104 * Given a PCI bus, returns the highest PCI bus number present in the set
105 * including the given PCI bus and its list of child PCI buses.
106 */
96bde06a 107unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4
LT
108{
109 struct list_head *tmp;
110 unsigned char max, n;
111
b82db5ce 112 max = bus->subordinate;
1da177e4
LT
113 list_for_each(tmp, &bus->children) {
114 n = pci_bus_max_busnr(pci_bus_b(tmp));
115 if(n > max)
116 max = n;
117 }
118 return max;
119}
b82db5ce 120EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 121
1684f5dd
AM
122#ifdef CONFIG_HAS_IOMEM
123void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
124{
125 /*
126 * Make sure the BAR is actually a memory resource, not an IO resource
127 */
128 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
129 WARN_ON(1);
130 return NULL;
131 }
132 return ioremap_nocache(pci_resource_start(pdev, bar),
133 pci_resource_len(pdev, bar));
134}
135EXPORT_SYMBOL_GPL(pci_ioremap_bar);
136#endif
137
b82db5ce 138#if 0
1da177e4
LT
139/**
140 * pci_max_busnr - returns maximum PCI bus number
141 *
142 * Returns the highest PCI bus number present in the system global list of
143 * PCI buses.
144 */
145unsigned char __devinit
146pci_max_busnr(void)
147{
148 struct pci_bus *bus = NULL;
149 unsigned char max, n;
150
151 max = 0;
152 while ((bus = pci_find_next_bus(bus)) != NULL) {
153 n = pci_bus_max_busnr(bus);
154 if(n > max)
155 max = n;
156 }
157 return max;
158}
159
54c762fe
AB
160#endif /* 0 */
161
687d5fe3
ME
162#define PCI_FIND_CAP_TTL 48
163
164static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
165 u8 pos, int cap, int *ttl)
24a4e377
RD
166{
167 u8 id;
24a4e377 168
687d5fe3 169 while ((*ttl)--) {
24a4e377
RD
170 pci_bus_read_config_byte(bus, devfn, pos, &pos);
171 if (pos < 0x40)
172 break;
173 pos &= ~3;
174 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
175 &id);
176 if (id == 0xff)
177 break;
178 if (id == cap)
179 return pos;
180 pos += PCI_CAP_LIST_NEXT;
181 }
182 return 0;
183}
184
687d5fe3
ME
185static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
186 u8 pos, int cap)
187{
188 int ttl = PCI_FIND_CAP_TTL;
189
190 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
191}
192
24a4e377
RD
193int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
194{
195 return __pci_find_next_cap(dev->bus, dev->devfn,
196 pos + PCI_CAP_LIST_NEXT, cap);
197}
198EXPORT_SYMBOL_GPL(pci_find_next_capability);
199
d3bac118
ME
200static int __pci_bus_find_cap_start(struct pci_bus *bus,
201 unsigned int devfn, u8 hdr_type)
1da177e4
LT
202{
203 u16 status;
1da177e4
LT
204
205 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
206 if (!(status & PCI_STATUS_CAP_LIST))
207 return 0;
208
209 switch (hdr_type) {
210 case PCI_HEADER_TYPE_NORMAL:
211 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 212 return PCI_CAPABILITY_LIST;
1da177e4 213 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 214 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
215 default:
216 return 0;
217 }
d3bac118
ME
218
219 return 0;
1da177e4
LT
220}
221
222/**
223 * pci_find_capability - query for devices' capabilities
224 * @dev: PCI device to query
225 * @cap: capability code
226 *
227 * Tell if a device supports a given PCI capability.
228 * Returns the address of the requested capability structure within the
229 * device's PCI configuration space or 0 in case the device does not
230 * support it. Possible values for @cap:
231 *
232 * %PCI_CAP_ID_PM Power Management
233 * %PCI_CAP_ID_AGP Accelerated Graphics Port
234 * %PCI_CAP_ID_VPD Vital Product Data
235 * %PCI_CAP_ID_SLOTID Slot Identification
236 * %PCI_CAP_ID_MSI Message Signalled Interrupts
237 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
238 * %PCI_CAP_ID_PCIX PCI-X
239 * %PCI_CAP_ID_EXP PCI Express
240 */
241int pci_find_capability(struct pci_dev *dev, int cap)
242{
d3bac118
ME
243 int pos;
244
245 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
246 if (pos)
247 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
248
249 return pos;
1da177e4
LT
250}
251
252/**
253 * pci_bus_find_capability - query for devices' capabilities
254 * @bus: the PCI bus to query
255 * @devfn: PCI device to query
256 * @cap: capability code
257 *
258 * Like pci_find_capability() but works for pci devices that do not have a
259 * pci_dev structure set up yet.
260 *
261 * Returns the address of the requested capability structure within the
262 * device's PCI configuration space or 0 in case the device does not
263 * support it.
264 */
265int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
266{
d3bac118 267 int pos;
1da177e4
LT
268 u8 hdr_type;
269
270 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
271
d3bac118
ME
272 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
273 if (pos)
274 pos = __pci_find_next_cap(bus, devfn, pos, cap);
275
276 return pos;
1da177e4
LT
277}
278
279/**
280 * pci_find_ext_capability - Find an extended capability
281 * @dev: PCI device to query
282 * @cap: capability code
283 *
284 * Returns the address of the requested extended capability structure
285 * within the device's PCI configuration space or 0 if the device does
286 * not support it. Possible values for @cap:
287 *
288 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
289 * %PCI_EXT_CAP_ID_VC Virtual Channel
290 * %PCI_EXT_CAP_ID_DSN Device Serial Number
291 * %PCI_EXT_CAP_ID_PWR Power Budgeting
292 */
293int pci_find_ext_capability(struct pci_dev *dev, int cap)
294{
295 u32 header;
557848c3
ZY
296 int ttl;
297 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 298
557848c3
ZY
299 /* minimum 8 bytes per capability */
300 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
301
302 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
303 return 0;
304
305 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
306 return 0;
307
308 /*
309 * If we have no capabilities, this is indicated by cap ID,
310 * cap version and next pointer all being 0.
311 */
312 if (header == 0)
313 return 0;
314
315 while (ttl-- > 0) {
316 if (PCI_EXT_CAP_ID(header) == cap)
317 return pos;
318
319 pos = PCI_EXT_CAP_NEXT(header);
557848c3 320 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
321 break;
322
323 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
324 break;
325 }
326
327 return 0;
328}
3a720d72 329EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 330
cf4c43dd
JB
331/**
332 * pci_bus_find_ext_capability - find an extended capability
333 * @bus: the PCI bus to query
334 * @devfn: PCI device to query
335 * @cap: capability code
336 *
337 * Like pci_find_ext_capability() but works for pci devices that do not have a
338 * pci_dev structure set up yet.
339 *
340 * Returns the address of the requested capability structure within the
341 * device's PCI configuration space or 0 in case the device does not
342 * support it.
343 */
344int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
345 int cap)
346{
347 u32 header;
348 int ttl;
349 int pos = PCI_CFG_SPACE_SIZE;
350
351 /* minimum 8 bytes per capability */
352 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
353
354 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
355 return 0;
356 if (header == 0xffffffff || header == 0)
357 return 0;
358
359 while (ttl-- > 0) {
360 if (PCI_EXT_CAP_ID(header) == cap)
361 return pos;
362
363 pos = PCI_EXT_CAP_NEXT(header);
364 if (pos < PCI_CFG_SPACE_SIZE)
365 break;
366
367 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
368 break;
369 }
370
371 return 0;
372}
373
687d5fe3
ME
374static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
375{
376 int rc, ttl = PCI_FIND_CAP_TTL;
377 u8 cap, mask;
378
379 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
380 mask = HT_3BIT_CAP_MASK;
381 else
382 mask = HT_5BIT_CAP_MASK;
383
384 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
385 PCI_CAP_ID_HT, &ttl);
386 while (pos) {
387 rc = pci_read_config_byte(dev, pos + 3, &cap);
388 if (rc != PCIBIOS_SUCCESSFUL)
389 return 0;
390
391 if ((cap & mask) == ht_cap)
392 return pos;
393
47a4d5be
BG
394 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
395 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
396 PCI_CAP_ID_HT, &ttl);
397 }
398
399 return 0;
400}
401/**
402 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
403 * @dev: PCI device to query
404 * @pos: Position from which to continue searching
405 * @ht_cap: Hypertransport capability code
406 *
407 * To be used in conjunction with pci_find_ht_capability() to search for
408 * all capabilities matching @ht_cap. @pos should always be a value returned
409 * from pci_find_ht_capability().
410 *
411 * NB. To be 100% safe against broken PCI devices, the caller should take
412 * steps to avoid an infinite loop.
413 */
414int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
415{
416 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
417}
418EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
419
420/**
421 * pci_find_ht_capability - query a device's Hypertransport capabilities
422 * @dev: PCI device to query
423 * @ht_cap: Hypertransport capability code
424 *
425 * Tell if a device supports a given Hypertransport capability.
426 * Returns an address within the device's PCI configuration space
427 * or 0 in case the device does not support the request capability.
428 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
429 * which has a Hypertransport capability matching @ht_cap.
430 */
431int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
432{
433 int pos;
434
435 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
436 if (pos)
437 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
438
439 return pos;
440}
441EXPORT_SYMBOL_GPL(pci_find_ht_capability);
442
1da177e4
LT
443/**
444 * pci_find_parent_resource - return resource region of parent bus of given region
445 * @dev: PCI device structure contains resources to be searched
446 * @res: child resource record for which parent is sought
447 *
448 * For given resource region of given device, return the resource
449 * region of parent bus the given region is contained in or where
450 * it should be allocated from.
451 */
452struct resource *
453pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
454{
455 const struct pci_bus *bus = dev->bus;
456 int i;
89a74ecc 457 struct resource *best = NULL, *r;
1da177e4 458
89a74ecc 459 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
460 if (!r)
461 continue;
462 if (res->start && !(res->start >= r->start && res->end <= r->end))
463 continue; /* Not contained */
464 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
465 continue; /* Wrong type */
466 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
467 return r; /* Exact match */
8c8def26
LT
468 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
469 if (r->flags & IORESOURCE_PREFETCH)
470 continue;
471 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
472 if (!best)
473 best = r;
1da177e4
LT
474 }
475 return best;
476}
477
064b53db
JL
478/**
479 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
480 * @dev: PCI device to have its BARs restored
481 *
482 * Restore the BAR values for a given device, so as to make it
483 * accessible by its driver.
484 */
ad668599 485static void
064b53db
JL
486pci_restore_bars(struct pci_dev *dev)
487{
bc5f5a82 488 int i;
064b53db 489
bc5f5a82 490 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 491 pci_update_resource(dev, i);
064b53db
JL
492}
493
961d9120
RW
494static struct pci_platform_pm_ops *pci_platform_pm;
495
496int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
497{
eb9d0fe4
RW
498 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
499 || !ops->sleep_wake || !ops->can_wakeup)
961d9120
RW
500 return -EINVAL;
501 pci_platform_pm = ops;
502 return 0;
503}
504
505static inline bool platform_pci_power_manageable(struct pci_dev *dev)
506{
507 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
508}
509
510static inline int platform_pci_set_power_state(struct pci_dev *dev,
511 pci_power_t t)
512{
513 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
514}
515
516static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
517{
518 return pci_platform_pm ?
519 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
520}
8f7020d3 521
eb9d0fe4
RW
522static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
523{
524 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
525}
526
527static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
528{
529 return pci_platform_pm ?
530 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
531}
532
b67ea761
RW
533static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
534{
535 return pci_platform_pm ?
536 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
537}
538
1da177e4 539/**
44e4e66e
RW
540 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
541 * given PCI device
542 * @dev: PCI device to handle.
44e4e66e 543 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 544 *
44e4e66e
RW
545 * RETURN VALUE:
546 * -EINVAL if the requested state is invalid.
547 * -EIO if device does not support PCI PM or its PM capabilities register has a
548 * wrong version, or device doesn't support the requested state.
549 * 0 if device already is in the requested state.
550 * 0 if device's power state has been successfully changed.
1da177e4 551 */
f00a20ef 552static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 553{
337001b6 554 u16 pmcsr;
44e4e66e 555 bool need_restore = false;
1da177e4 556
4a865905
RW
557 /* Check if we're already there */
558 if (dev->current_state == state)
559 return 0;
560
337001b6 561 if (!dev->pm_cap)
cca03dec
AL
562 return -EIO;
563
44e4e66e
RW
564 if (state < PCI_D0 || state > PCI_D3hot)
565 return -EINVAL;
566
1da177e4
LT
567 /* Validate current state:
568 * Can enter D0 from any state, but if we can only go deeper
569 * to sleep if we're already in a low power state
570 */
4a865905 571 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 572 && dev->current_state > state) {
80ccba11
BH
573 dev_err(&dev->dev, "invalid power transition "
574 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 575 return -EINVAL;
44e4e66e 576 }
1da177e4 577
1da177e4 578 /* check if this device supports the desired state */
337001b6
RW
579 if ((state == PCI_D1 && !dev->d1_support)
580 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 581 return -EIO;
1da177e4 582
337001b6 583 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 584
32a36585 585 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
586 * This doesn't affect PME_Status, disables PME_En, and
587 * sets PowerState to 0.
588 */
32a36585 589 switch (dev->current_state) {
d3535fbb
JL
590 case PCI_D0:
591 case PCI_D1:
592 case PCI_D2:
593 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
594 pmcsr |= state;
595 break;
f62795f1
RW
596 case PCI_D3hot:
597 case PCI_D3cold:
32a36585
JL
598 case PCI_UNKNOWN: /* Boot-up */
599 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 600 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 601 need_restore = true;
32a36585 602 /* Fall-through: force to D0 */
32a36585 603 default:
d3535fbb 604 pmcsr = 0;
32a36585 605 break;
1da177e4
LT
606 }
607
608 /* enter specified state */
337001b6 609 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
610
611 /* Mandatory power management transition delays */
612 /* see PCI PM 1.1 5.6.1 table 18 */
613 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 614 pci_dev_d3_sleep(dev);
1da177e4 615 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 616 udelay(PCI_PM_D2_DELAY);
1da177e4 617
e13cdbd7
RW
618 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
619 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
620 if (dev->current_state != state && printk_ratelimit())
621 dev_info(&dev->dev, "Refused to change power state, "
622 "currently in D%d\n", dev->current_state);
064b53db
JL
623
624 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
625 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
626 * from D3hot to D0 _may_ perform an internal reset, thereby
627 * going to "D0 Uninitialized" rather than "D0 Initialized".
628 * For example, at least some versions of the 3c905B and the
629 * 3c556B exhibit this behaviour.
630 *
631 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
632 * devices in a D3hot state at boot. Consequently, we need to
633 * restore at least the BARs so that the device will be
634 * accessible to its driver.
635 */
636 if (need_restore)
637 pci_restore_bars(dev);
638
f00a20ef 639 if (dev->bus->self)
7d715a6c
SL
640 pcie_aspm_pm_state_change(dev->bus->self);
641
1da177e4
LT
642 return 0;
643}
644
44e4e66e
RW
645/**
646 * pci_update_current_state - Read PCI power state of given device from its
647 * PCI PM registers and cache it
648 * @dev: PCI device to handle.
f06fc0b6 649 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 650 */
73410429 651void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 652{
337001b6 653 if (dev->pm_cap) {
44e4e66e
RW
654 u16 pmcsr;
655
337001b6 656 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 657 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
658 } else {
659 dev->current_state = state;
44e4e66e
RW
660 }
661}
662
0e5dd46b
RW
663/**
664 * pci_platform_power_transition - Use platform to change device power state
665 * @dev: PCI device to handle.
666 * @state: State to put the device into.
667 */
668static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
669{
670 int error;
671
672 if (platform_pci_power_manageable(dev)) {
673 error = platform_pci_set_power_state(dev, state);
674 if (!error)
675 pci_update_current_state(dev, state);
b51306c6
AH
676 /* Fall back to PCI_D0 if native PM is not supported */
677 if (!dev->pm_cap)
678 dev->current_state = PCI_D0;
0e5dd46b
RW
679 } else {
680 error = -ENODEV;
681 /* Fall back to PCI_D0 if native PM is not supported */
b3bad72e
RW
682 if (!dev->pm_cap)
683 dev->current_state = PCI_D0;
0e5dd46b
RW
684 }
685
686 return error;
687}
688
689/**
690 * __pci_start_power_transition - Start power transition of a PCI device
691 * @dev: PCI device to handle.
692 * @state: State to put the device into.
693 */
694static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
695{
696 if (state == PCI_D0)
697 pci_platform_power_transition(dev, PCI_D0);
698}
699
700/**
701 * __pci_complete_power_transition - Complete power transition of a PCI device
702 * @dev: PCI device to handle.
703 * @state: State to put the device into.
704 *
705 * This function should not be called directly by device drivers.
706 */
707int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
708{
cc2893b6 709 return state >= PCI_D0 ?
0e5dd46b
RW
710 pci_platform_power_transition(dev, state) : -EINVAL;
711}
712EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
713
44e4e66e
RW
714/**
715 * pci_set_power_state - Set the power state of a PCI device
716 * @dev: PCI device to handle.
717 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
718 *
877d0310 719 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
720 * the device's PCI PM registers.
721 *
722 * RETURN VALUE:
723 * -EINVAL if the requested state is invalid.
724 * -EIO if device does not support PCI PM or its PM capabilities register has a
725 * wrong version, or device doesn't support the requested state.
726 * 0 if device already is in the requested state.
727 * 0 if device's power state has been successfully changed.
728 */
729int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
730{
337001b6 731 int error;
44e4e66e
RW
732
733 /* bound the state we're entering */
734 if (state > PCI_D3hot)
735 state = PCI_D3hot;
736 else if (state < PCI_D0)
737 state = PCI_D0;
738 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
739 /*
740 * If the device or the parent bridge do not support PCI PM,
741 * ignore the request if we're doing anything other than putting
742 * it into D0 (which would only happen on boot).
743 */
744 return 0;
745
0e5dd46b
RW
746 __pci_start_power_transition(dev, state);
747
979b1791
AC
748 /* This device is quirked not to be put into D3, so
749 don't put it in D3 */
750 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
751 return 0;
44e4e66e 752
f00a20ef 753 error = pci_raw_set_power_state(dev, state);
44e4e66e 754
0e5dd46b
RW
755 if (!__pci_complete_power_transition(dev, state))
756 error = 0;
1a680b7c
NC
757 /*
758 * When aspm_policy is "powersave" this call ensures
759 * that ASPM is configured.
760 */
761 if (!error && dev->bus->self)
762 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
763
764 return error;
765}
766
1da177e4
LT
767/**
768 * pci_choose_state - Choose the power state of a PCI device
769 * @dev: PCI device to be suspended
770 * @state: target sleep state for the whole system. This is the value
771 * that is passed to suspend() function.
772 *
773 * Returns PCI power state suitable for given device and given system
774 * message.
775 */
776
777pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
778{
ab826ca4 779 pci_power_t ret;
0f64474b 780
1da177e4
LT
781 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
782 return PCI_D0;
783
961d9120
RW
784 ret = platform_pci_choose_state(dev);
785 if (ret != PCI_POWER_ERROR)
786 return ret;
ca078bae
PM
787
788 switch (state.event) {
789 case PM_EVENT_ON:
790 return PCI_D0;
791 case PM_EVENT_FREEZE:
b887d2e6
DB
792 case PM_EVENT_PRETHAW:
793 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 794 case PM_EVENT_SUSPEND:
3a2d5b70 795 case PM_EVENT_HIBERNATE:
ca078bae 796 return PCI_D3hot;
1da177e4 797 default:
80ccba11
BH
798 dev_info(&dev->dev, "unrecognized suspend event %d\n",
799 state.event);
1da177e4
LT
800 BUG();
801 }
802 return PCI_D0;
803}
804
805EXPORT_SYMBOL(pci_choose_state);
806
89858517
YZ
807#define PCI_EXP_SAVE_REGS 7
808
1b6b8ce2
YZ
809#define pcie_cap_has_devctl(type, flags) 1
810#define pcie_cap_has_lnkctl(type, flags) \
811 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
812 (type == PCI_EXP_TYPE_ROOT_PORT || \
813 type == PCI_EXP_TYPE_ENDPOINT || \
814 type == PCI_EXP_TYPE_LEG_END))
815#define pcie_cap_has_sltctl(type, flags) \
816 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
817 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
818 (type == PCI_EXP_TYPE_DOWNSTREAM && \
819 (flags & PCI_EXP_FLAGS_SLOT))))
820#define pcie_cap_has_rtctl(type, flags) \
821 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
822 (type == PCI_EXP_TYPE_ROOT_PORT || \
823 type == PCI_EXP_TYPE_RC_EC))
824#define pcie_cap_has_devctl2(type, flags) \
825 ((flags & PCI_EXP_FLAGS_VERS) > 1)
826#define pcie_cap_has_lnkctl2(type, flags) \
827 ((flags & PCI_EXP_FLAGS_VERS) > 1)
828#define pcie_cap_has_sltctl2(type, flags) \
829 ((flags & PCI_EXP_FLAGS_VERS) > 1)
830
34a4876e
YL
831static struct pci_cap_saved_state *pci_find_saved_cap(
832 struct pci_dev *pci_dev, char cap)
833{
834 struct pci_cap_saved_state *tmp;
835 struct hlist_node *pos;
836
837 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
838 if (tmp->cap.cap_nr == cap)
839 return tmp;
840 }
841 return NULL;
842}
843
b56a5a23
MT
844static int pci_save_pcie_state(struct pci_dev *dev)
845{
846 int pos, i = 0;
847 struct pci_cap_saved_state *save_state;
848 u16 *cap;
1b6b8ce2 849 u16 flags;
b56a5a23 850
06a1cbaf
KK
851 pos = pci_pcie_cap(dev);
852 if (!pos)
b56a5a23
MT
853 return 0;
854
9f35575d 855 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 856 if (!save_state) {
e496b617 857 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
858 return -ENOMEM;
859 }
24a4742f 860 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 861
1b6b8ce2
YZ
862 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
863
864 if (pcie_cap_has_devctl(dev->pcie_type, flags))
865 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
866 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
867 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
868 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
869 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
870 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
871 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
872 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
873 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
874 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
875 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
876 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
877 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
63f4898a 878
b56a5a23
MT
879 return 0;
880}
881
882static void pci_restore_pcie_state(struct pci_dev *dev)
883{
884 int i = 0, pos;
885 struct pci_cap_saved_state *save_state;
886 u16 *cap;
1b6b8ce2 887 u16 flags;
b56a5a23
MT
888
889 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
890 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
891 if (!save_state || pos <= 0)
892 return;
24a4742f 893 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 894
1b6b8ce2
YZ
895 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
896
897 if (pcie_cap_has_devctl(dev->pcie_type, flags))
898 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
899 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
900 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
901 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
902 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
903 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
904 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
905 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
906 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
907 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
908 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
909 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
910 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
911}
912
cc692a5f
SH
913
914static int pci_save_pcix_state(struct pci_dev *dev)
915{
63f4898a 916 int pos;
cc692a5f 917 struct pci_cap_saved_state *save_state;
cc692a5f
SH
918
919 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
920 if (pos <= 0)
921 return 0;
922
f34303de 923 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 924 if (!save_state) {
e496b617 925 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
926 return -ENOMEM;
927 }
cc692a5f 928
24a4742f
AW
929 pci_read_config_word(dev, pos + PCI_X_CMD,
930 (u16 *)save_state->cap.data);
63f4898a 931
cc692a5f
SH
932 return 0;
933}
934
935static void pci_restore_pcix_state(struct pci_dev *dev)
936{
937 int i = 0, pos;
938 struct pci_cap_saved_state *save_state;
939 u16 *cap;
940
941 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
942 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
943 if (!save_state || pos <= 0)
944 return;
24a4742f 945 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
946
947 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
948}
949
950
1da177e4
LT
951/**
952 * pci_save_state - save the PCI configuration space of a device before suspending
953 * @dev: - PCI device that we're dealing with
1da177e4
LT
954 */
955int
956pci_save_state(struct pci_dev *dev)
957{
958 int i;
959 /* XXX: 100% dword access ok here? */
960 for (i = 0; i < 16; i++)
9e0b5b2c 961 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 962 dev->state_saved = true;
b56a5a23
MT
963 if ((i = pci_save_pcie_state(dev)) != 0)
964 return i;
cc692a5f
SH
965 if ((i = pci_save_pcix_state(dev)) != 0)
966 return i;
1da177e4
LT
967 return 0;
968}
969
ebfc5b80
RW
970static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
971 u32 saved_val, int retry)
972{
973 u32 val;
974
975 pci_read_config_dword(pdev, offset, &val);
976 if (val == saved_val)
977 return;
978
979 for (;;) {
980 dev_dbg(&pdev->dev, "restoring config space at offset "
981 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
982 pci_write_config_dword(pdev, offset, saved_val);
983 if (retry-- <= 0)
984 return;
985
986 pci_read_config_dword(pdev, offset, &val);
987 if (val == saved_val)
988 return;
989
990 mdelay(1);
991 }
992}
993
a6cb9ee7
RW
994static void pci_restore_config_space_range(struct pci_dev *pdev,
995 int start, int end, int retry)
ebfc5b80
RW
996{
997 int index;
998
999 for (index = end; index >= start; index--)
1000 pci_restore_config_dword(pdev, 4 * index,
1001 pdev->saved_config_space[index],
1002 retry);
1003}
1004
a6cb9ee7
RW
1005static void pci_restore_config_space(struct pci_dev *pdev)
1006{
1007 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1008 pci_restore_config_space_range(pdev, 10, 15, 0);
1009 /* Restore BARs before the command register. */
1010 pci_restore_config_space_range(pdev, 4, 9, 10);
1011 pci_restore_config_space_range(pdev, 0, 3, 0);
1012 } else {
1013 pci_restore_config_space_range(pdev, 0, 15, 0);
1014 }
1015}
1016
1da177e4
LT
1017/**
1018 * pci_restore_state - Restore the saved state of a PCI device
1019 * @dev: - PCI device that we're dealing with
1da177e4 1020 */
1d3c16a8 1021void pci_restore_state(struct pci_dev *dev)
1da177e4 1022{
c82f63e4 1023 if (!dev->state_saved)
1d3c16a8 1024 return;
4b77b0a2 1025
b56a5a23
MT
1026 /* PCI Express register must be restored first */
1027 pci_restore_pcie_state(dev);
1900ca13 1028 pci_restore_ats_state(dev);
b56a5a23 1029
a6cb9ee7 1030 pci_restore_config_space(dev);
ebfc5b80 1031
cc692a5f 1032 pci_restore_pcix_state(dev);
41017f0c 1033 pci_restore_msi_state(dev);
8c5cdb6a 1034 pci_restore_iov_state(dev);
8fed4b65 1035
4b77b0a2 1036 dev->state_saved = false;
1da177e4
LT
1037}
1038
ffbdd3f7
AW
1039struct pci_saved_state {
1040 u32 config_space[16];
1041 struct pci_cap_saved_data cap[0];
1042};
1043
1044/**
1045 * pci_store_saved_state - Allocate and return an opaque struct containing
1046 * the device saved state.
1047 * @dev: PCI device that we're dealing with
1048 *
1049 * Rerturn NULL if no state or error.
1050 */
1051struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1052{
1053 struct pci_saved_state *state;
1054 struct pci_cap_saved_state *tmp;
1055 struct pci_cap_saved_data *cap;
1056 struct hlist_node *pos;
1057 size_t size;
1058
1059 if (!dev->state_saved)
1060 return NULL;
1061
1062 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1063
1064 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1065 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1066
1067 state = kzalloc(size, GFP_KERNEL);
1068 if (!state)
1069 return NULL;
1070
1071 memcpy(state->config_space, dev->saved_config_space,
1072 sizeof(state->config_space));
1073
1074 cap = state->cap;
1075 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1076 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1077 memcpy(cap, &tmp->cap, len);
1078 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1079 }
1080 /* Empty cap_save terminates list */
1081
1082 return state;
1083}
1084EXPORT_SYMBOL_GPL(pci_store_saved_state);
1085
1086/**
1087 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1088 * @dev: PCI device that we're dealing with
1089 * @state: Saved state returned from pci_store_saved_state()
1090 */
1091int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1092{
1093 struct pci_cap_saved_data *cap;
1094
1095 dev->state_saved = false;
1096
1097 if (!state)
1098 return 0;
1099
1100 memcpy(dev->saved_config_space, state->config_space,
1101 sizeof(state->config_space));
1102
1103 cap = state->cap;
1104 while (cap->size) {
1105 struct pci_cap_saved_state *tmp;
1106
1107 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1108 if (!tmp || tmp->cap.size != cap->size)
1109 return -EINVAL;
1110
1111 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1112 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1113 sizeof(struct pci_cap_saved_data) + cap->size);
1114 }
1115
1116 dev->state_saved = true;
1117 return 0;
1118}
1119EXPORT_SYMBOL_GPL(pci_load_saved_state);
1120
1121/**
1122 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1123 * and free the memory allocated for it.
1124 * @dev: PCI device that we're dealing with
1125 * @state: Pointer to saved state returned from pci_store_saved_state()
1126 */
1127int pci_load_and_free_saved_state(struct pci_dev *dev,
1128 struct pci_saved_state **state)
1129{
1130 int ret = pci_load_saved_state(dev, *state);
1131 kfree(*state);
1132 *state = NULL;
1133 return ret;
1134}
1135EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1136
38cc1302
HS
1137static int do_pci_enable_device(struct pci_dev *dev, int bars)
1138{
1139 int err;
1140
1141 err = pci_set_power_state(dev, PCI_D0);
1142 if (err < 0 && err != -EIO)
1143 return err;
1144 err = pcibios_enable_device(dev, bars);
1145 if (err < 0)
1146 return err;
1147 pci_fixup_device(pci_fixup_enable, dev);
1148
1149 return 0;
1150}
1151
1152/**
0b62e13b 1153 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1154 * @dev: PCI device to be resumed
1155 *
1156 * Note this function is a backend of pci_default_resume and is not supposed
1157 * to be called by normal code, write proper resume handler and use it instead.
1158 */
0b62e13b 1159int pci_reenable_device(struct pci_dev *dev)
38cc1302 1160{
296ccb08 1161 if (pci_is_enabled(dev))
38cc1302
HS
1162 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1163 return 0;
1164}
1165
b718989d
BH
1166static int __pci_enable_device_flags(struct pci_dev *dev,
1167 resource_size_t flags)
1da177e4
LT
1168{
1169 int err;
b718989d 1170 int i, bars = 0;
1da177e4 1171
97c145f7
JB
1172 /*
1173 * Power state could be unknown at this point, either due to a fresh
1174 * boot or a device removal call. So get the current power state
1175 * so that things like MSI message writing will behave as expected
1176 * (e.g. if the device really is in D0 at enable time).
1177 */
1178 if (dev->pm_cap) {
1179 u16 pmcsr;
1180 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1181 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1182 }
1183
9fb625c3
HS
1184 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1185 return 0; /* already enabled */
1186
497f16f2
YL
1187 /* only skip sriov related */
1188 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1189 if (dev->resource[i].flags & flags)
1190 bars |= (1 << i);
1191 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1192 if (dev->resource[i].flags & flags)
1193 bars |= (1 << i);
1194
38cc1302 1195 err = do_pci_enable_device(dev, bars);
95a62965 1196 if (err < 0)
38cc1302 1197 atomic_dec(&dev->enable_cnt);
9fb625c3 1198 return err;
1da177e4
LT
1199}
1200
b718989d
BH
1201/**
1202 * pci_enable_device_io - Initialize a device for use with IO space
1203 * @dev: PCI device to be initialized
1204 *
1205 * Initialize device before it's used by a driver. Ask low-level code
1206 * to enable I/O resources. Wake up the device if it was suspended.
1207 * Beware, this function can fail.
1208 */
1209int pci_enable_device_io(struct pci_dev *dev)
1210{
1211 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1212}
1213
1214/**
1215 * pci_enable_device_mem - Initialize a device for use with Memory space
1216 * @dev: PCI device to be initialized
1217 *
1218 * Initialize device before it's used by a driver. Ask low-level code
1219 * to enable Memory resources. Wake up the device if it was suspended.
1220 * Beware, this function can fail.
1221 */
1222int pci_enable_device_mem(struct pci_dev *dev)
1223{
1224 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1225}
1226
bae94d02
IPG
1227/**
1228 * pci_enable_device - Initialize device before it's used by a driver.
1229 * @dev: PCI device to be initialized
1230 *
1231 * Initialize device before it's used by a driver. Ask low-level code
1232 * to enable I/O and memory. Wake up the device if it was suspended.
1233 * Beware, this function can fail.
1234 *
1235 * Note we don't actually enable the device many times if we call
1236 * this function repeatedly (we just increment the count).
1237 */
1238int pci_enable_device(struct pci_dev *dev)
1239{
b718989d 1240 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1241}
1242
9ac7849e
TH
1243/*
1244 * Managed PCI resources. This manages device on/off, intx/msi/msix
1245 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1246 * there's no need to track it separately. pci_devres is initialized
1247 * when a device is enabled using managed PCI device enable interface.
1248 */
1249struct pci_devres {
7f375f32
TH
1250 unsigned int enabled:1;
1251 unsigned int pinned:1;
9ac7849e
TH
1252 unsigned int orig_intx:1;
1253 unsigned int restore_intx:1;
1254 u32 region_mask;
1255};
1256
1257static void pcim_release(struct device *gendev, void *res)
1258{
1259 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1260 struct pci_devres *this = res;
1261 int i;
1262
1263 if (dev->msi_enabled)
1264 pci_disable_msi(dev);
1265 if (dev->msix_enabled)
1266 pci_disable_msix(dev);
1267
1268 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1269 if (this->region_mask & (1 << i))
1270 pci_release_region(dev, i);
1271
1272 if (this->restore_intx)
1273 pci_intx(dev, this->orig_intx);
1274
7f375f32 1275 if (this->enabled && !this->pinned)
9ac7849e
TH
1276 pci_disable_device(dev);
1277}
1278
1279static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1280{
1281 struct pci_devres *dr, *new_dr;
1282
1283 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1284 if (dr)
1285 return dr;
1286
1287 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1288 if (!new_dr)
1289 return NULL;
1290 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1291}
1292
1293static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1294{
1295 if (pci_is_managed(pdev))
1296 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1297 return NULL;
1298}
1299
1300/**
1301 * pcim_enable_device - Managed pci_enable_device()
1302 * @pdev: PCI device to be initialized
1303 *
1304 * Managed pci_enable_device().
1305 */
1306int pcim_enable_device(struct pci_dev *pdev)
1307{
1308 struct pci_devres *dr;
1309 int rc;
1310
1311 dr = get_pci_dr(pdev);
1312 if (unlikely(!dr))
1313 return -ENOMEM;
b95d58ea
TH
1314 if (dr->enabled)
1315 return 0;
9ac7849e
TH
1316
1317 rc = pci_enable_device(pdev);
1318 if (!rc) {
1319 pdev->is_managed = 1;
7f375f32 1320 dr->enabled = 1;
9ac7849e
TH
1321 }
1322 return rc;
1323}
1324
1325/**
1326 * pcim_pin_device - Pin managed PCI device
1327 * @pdev: PCI device to pin
1328 *
1329 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1330 * driver detach. @pdev must have been enabled with
1331 * pcim_enable_device().
1332 */
1333void pcim_pin_device(struct pci_dev *pdev)
1334{
1335 struct pci_devres *dr;
1336
1337 dr = find_pci_dr(pdev);
7f375f32 1338 WARN_ON(!dr || !dr->enabled);
9ac7849e 1339 if (dr)
7f375f32 1340 dr->pinned = 1;
9ac7849e
TH
1341}
1342
1da177e4
LT
1343/**
1344 * pcibios_disable_device - disable arch specific PCI resources for device dev
1345 * @dev: the PCI device to disable
1346 *
1347 * Disables architecture specific PCI resources for the device. This
1348 * is the default implementation. Architecture implementations can
1349 * override this.
1350 */
1351void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1352
fa58d305
RW
1353static void do_pci_disable_device(struct pci_dev *dev)
1354{
1355 u16 pci_command;
1356
1357 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1358 if (pci_command & PCI_COMMAND_MASTER) {
1359 pci_command &= ~PCI_COMMAND_MASTER;
1360 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1361 }
1362
1363 pcibios_disable_device(dev);
1364}
1365
1366/**
1367 * pci_disable_enabled_device - Disable device without updating enable_cnt
1368 * @dev: PCI device to disable
1369 *
1370 * NOTE: This function is a backend of PCI power management routines and is
1371 * not supposed to be called drivers.
1372 */
1373void pci_disable_enabled_device(struct pci_dev *dev)
1374{
296ccb08 1375 if (pci_is_enabled(dev))
fa58d305
RW
1376 do_pci_disable_device(dev);
1377}
1378
1da177e4
LT
1379/**
1380 * pci_disable_device - Disable PCI device after use
1381 * @dev: PCI device to be disabled
1382 *
1383 * Signal to the system that the PCI device is not in use by the system
1384 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1385 *
1386 * Note we don't actually disable the device until all callers of
ee6583f6 1387 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1388 */
1389void
1390pci_disable_device(struct pci_dev *dev)
1391{
9ac7849e 1392 struct pci_devres *dr;
99dc804d 1393
9ac7849e
TH
1394 dr = find_pci_dr(dev);
1395 if (dr)
7f375f32 1396 dr->enabled = 0;
9ac7849e 1397
bae94d02
IPG
1398 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1399 return;
1400
fa58d305 1401 do_pci_disable_device(dev);
1da177e4 1402
fa58d305 1403 dev->is_busmaster = 0;
1da177e4
LT
1404}
1405
f7bdd12d
BK
1406/**
1407 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1408 * @dev: the PCIe device reset
f7bdd12d
BK
1409 * @state: Reset state to enter into
1410 *
1411 *
45e829ea 1412 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1413 * implementation. Architecture implementations can override this.
1414 */
1415int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1416 enum pcie_reset_state state)
1417{
1418 return -EINVAL;
1419}
1420
1421/**
1422 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1423 * @dev: the PCIe device reset
f7bdd12d
BK
1424 * @state: Reset state to enter into
1425 *
1426 *
1427 * Sets the PCI reset state for the device.
1428 */
1429int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1430{
1431 return pcibios_set_pcie_reset_state(dev, state);
1432}
1433
58ff4633
RW
1434/**
1435 * pci_check_pme_status - Check if given device has generated PME.
1436 * @dev: Device to check.
1437 *
1438 * Check the PME status of the device and if set, clear it and clear PME enable
1439 * (if set). Return 'true' if PME status and PME enable were both set or
1440 * 'false' otherwise.
1441 */
1442bool pci_check_pme_status(struct pci_dev *dev)
1443{
1444 int pmcsr_pos;
1445 u16 pmcsr;
1446 bool ret = false;
1447
1448 if (!dev->pm_cap)
1449 return false;
1450
1451 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1452 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1453 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1454 return false;
1455
1456 /* Clear PME status. */
1457 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1458 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1459 /* Disable PME to avoid interrupt flood. */
1460 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1461 ret = true;
1462 }
1463
1464 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1465
1466 return ret;
1467}
1468
b67ea761
RW
1469/**
1470 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1471 * @dev: Device to handle.
379021d5 1472 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1473 *
1474 * Check if @dev has generated PME and queue a resume request for it in that
1475 * case.
1476 */
379021d5 1477static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1478{
379021d5
RW
1479 if (pme_poll_reset && dev->pme_poll)
1480 dev->pme_poll = false;
1481
c125e96f 1482 if (pci_check_pme_status(dev)) {
c125e96f 1483 pci_wakeup_event(dev);
0f953bf6 1484 pm_request_resume(&dev->dev);
c125e96f 1485 }
b67ea761
RW
1486 return 0;
1487}
1488
1489/**
1490 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1491 * @bus: Top bus of the subtree to walk.
1492 */
1493void pci_pme_wakeup_bus(struct pci_bus *bus)
1494{
1495 if (bus)
379021d5 1496 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1497}
1498
eb9d0fe4
RW
1499/**
1500 * pci_pme_capable - check the capability of PCI device to generate PME#
1501 * @dev: PCI device to handle.
eb9d0fe4
RW
1502 * @state: PCI state from which device will issue PME#.
1503 */
e5899e1b 1504bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1505{
337001b6 1506 if (!dev->pm_cap)
eb9d0fe4
RW
1507 return false;
1508
337001b6 1509 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1510}
1511
df17e62e
MG
1512static void pci_pme_list_scan(struct work_struct *work)
1513{
379021d5 1514 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1515
1516 mutex_lock(&pci_pme_list_mutex);
1517 if (!list_empty(&pci_pme_list)) {
379021d5
RW
1518 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1519 if (pme_dev->dev->pme_poll) {
1520 pci_pme_wakeup(pme_dev->dev, NULL);
1521 } else {
1522 list_del(&pme_dev->list);
1523 kfree(pme_dev);
1524 }
1525 }
1526 if (!list_empty(&pci_pme_list))
1527 schedule_delayed_work(&pci_pme_work,
1528 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1529 }
1530 mutex_unlock(&pci_pme_list_mutex);
1531}
1532
eb9d0fe4
RW
1533/**
1534 * pci_pme_active - enable or disable PCI device's PME# function
1535 * @dev: PCI device to handle.
eb9d0fe4
RW
1536 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1537 *
1538 * The caller must verify that the device is capable of generating PME# before
1539 * calling this function with @enable equal to 'true'.
1540 */
5a6c9b60 1541void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1542{
1543 u16 pmcsr;
1544
337001b6 1545 if (!dev->pm_cap)
eb9d0fe4
RW
1546 return;
1547
337001b6 1548 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1549 /* Clear PME_Status by writing 1 to it and enable PME# */
1550 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1551 if (!enable)
1552 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1553
337001b6 1554 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1555
df17e62e
MG
1556 /* PCI (as opposed to PCIe) PME requires that the device have
1557 its PME# line hooked up correctly. Not all hardware vendors
1558 do this, so the PME never gets delivered and the device
1559 remains asleep. The easiest way around this is to
1560 periodically walk the list of suspended devices and check
1561 whether any have their PME flag set. The assumption is that
1562 we'll wake up often enough anyway that this won't be a huge
1563 hit, and the power savings from the devices will still be a
1564 win. */
1565
379021d5 1566 if (dev->pme_poll) {
df17e62e
MG
1567 struct pci_pme_device *pme_dev;
1568 if (enable) {
1569 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1570 GFP_KERNEL);
1571 if (!pme_dev)
1572 goto out;
1573 pme_dev->dev = dev;
1574 mutex_lock(&pci_pme_list_mutex);
1575 list_add(&pme_dev->list, &pci_pme_list);
1576 if (list_is_singular(&pci_pme_list))
1577 schedule_delayed_work(&pci_pme_work,
1578 msecs_to_jiffies(PME_TIMEOUT));
1579 mutex_unlock(&pci_pme_list_mutex);
1580 } else {
1581 mutex_lock(&pci_pme_list_mutex);
1582 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1583 if (pme_dev->dev == dev) {
1584 list_del(&pme_dev->list);
1585 kfree(pme_dev);
1586 break;
1587 }
1588 }
1589 mutex_unlock(&pci_pme_list_mutex);
1590 }
1591 }
1592
1593out:
85b8582d 1594 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1595}
1596
1da177e4 1597/**
6cbf8214 1598 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1599 * @dev: PCI device affected
1600 * @state: PCI state from which device will issue wakeup events
6cbf8214 1601 * @runtime: True if the events are to be generated at run time
075c1771
DB
1602 * @enable: True to enable event generation; false to disable
1603 *
1604 * This enables the device as a wakeup event source, or disables it.
1605 * When such events involves platform-specific hooks, those hooks are
1606 * called automatically by this routine.
1607 *
1608 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1609 * always require such platform hooks.
075c1771 1610 *
eb9d0fe4
RW
1611 * RETURN VALUE:
1612 * 0 is returned on success
1613 * -EINVAL is returned if device is not supposed to wake up the system
1614 * Error code depending on the platform is returned if both the platform and
1615 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1616 */
6cbf8214
RW
1617int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1618 bool runtime, bool enable)
1da177e4 1619{
5bcc2fb4 1620 int ret = 0;
075c1771 1621
6cbf8214 1622 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1623 return -EINVAL;
1da177e4 1624
e80bb09d
RW
1625 /* Don't do the same thing twice in a row for one device. */
1626 if (!!enable == !!dev->wakeup_prepared)
1627 return 0;
1628
eb9d0fe4
RW
1629 /*
1630 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1631 * Anderson we should be doing PME# wake enable followed by ACPI wake
1632 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1633 */
1da177e4 1634
5bcc2fb4
RW
1635 if (enable) {
1636 int error;
1da177e4 1637
5bcc2fb4
RW
1638 if (pci_pme_capable(dev, state))
1639 pci_pme_active(dev, true);
1640 else
1641 ret = 1;
6cbf8214
RW
1642 error = runtime ? platform_pci_run_wake(dev, true) :
1643 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1644 if (ret)
1645 ret = error;
e80bb09d
RW
1646 if (!ret)
1647 dev->wakeup_prepared = true;
5bcc2fb4 1648 } else {
6cbf8214
RW
1649 if (runtime)
1650 platform_pci_run_wake(dev, false);
1651 else
1652 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1653 pci_pme_active(dev, false);
e80bb09d 1654 dev->wakeup_prepared = false;
5bcc2fb4 1655 }
1da177e4 1656
5bcc2fb4 1657 return ret;
eb9d0fe4 1658}
6cbf8214 1659EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1660
0235c4fc
RW
1661/**
1662 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1663 * @dev: PCI device to prepare
1664 * @enable: True to enable wake-up event generation; false to disable
1665 *
1666 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1667 * and this function allows them to set that up cleanly - pci_enable_wake()
1668 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1669 * ordering constraints.
1670 *
1671 * This function only returns error code if the device is not capable of
1672 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1673 * enable wake-up power for it.
1674 */
1675int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1676{
1677 return pci_pme_capable(dev, PCI_D3cold) ?
1678 pci_enable_wake(dev, PCI_D3cold, enable) :
1679 pci_enable_wake(dev, PCI_D3hot, enable);
1680}
1681
404cc2d8 1682/**
37139074
JB
1683 * pci_target_state - find an appropriate low power state for a given PCI dev
1684 * @dev: PCI device
1685 *
1686 * Use underlying platform code to find a supported low power state for @dev.
1687 * If the platform can't manage @dev, return the deepest state from which it
1688 * can generate wake events, based on any available PME info.
404cc2d8 1689 */
e5899e1b 1690pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1691{
1692 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1693
1694 if (platform_pci_power_manageable(dev)) {
1695 /*
1696 * Call the platform to choose the target state of the device
1697 * and enable wake-up from this state if supported.
1698 */
1699 pci_power_t state = platform_pci_choose_state(dev);
1700
1701 switch (state) {
1702 case PCI_POWER_ERROR:
1703 case PCI_UNKNOWN:
1704 break;
1705 case PCI_D1:
1706 case PCI_D2:
1707 if (pci_no_d1d2(dev))
1708 break;
1709 default:
1710 target_state = state;
404cc2d8 1711 }
d2abdf62
RW
1712 } else if (!dev->pm_cap) {
1713 target_state = PCI_D0;
404cc2d8
RW
1714 } else if (device_may_wakeup(&dev->dev)) {
1715 /*
1716 * Find the deepest state from which the device can generate
1717 * wake-up events, make it the target state and enable device
1718 * to generate PME#.
1719 */
337001b6
RW
1720 if (dev->pme_support) {
1721 while (target_state
1722 && !(dev->pme_support & (1 << target_state)))
1723 target_state--;
404cc2d8
RW
1724 }
1725 }
1726
e5899e1b
RW
1727 return target_state;
1728}
1729
1730/**
1731 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1732 * @dev: Device to handle.
1733 *
1734 * Choose the power state appropriate for the device depending on whether
1735 * it can wake up the system and/or is power manageable by the platform
1736 * (PCI_D3hot is the default) and put the device into that state.
1737 */
1738int pci_prepare_to_sleep(struct pci_dev *dev)
1739{
1740 pci_power_t target_state = pci_target_state(dev);
1741 int error;
1742
1743 if (target_state == PCI_POWER_ERROR)
1744 return -EIO;
1745
8efb8c76 1746 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1747
404cc2d8
RW
1748 error = pci_set_power_state(dev, target_state);
1749
1750 if (error)
1751 pci_enable_wake(dev, target_state, false);
1752
1753 return error;
1754}
1755
1756/**
443bd1c4 1757 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1758 * @dev: Device to handle.
1759 *
88393161 1760 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1761 */
1762int pci_back_from_sleep(struct pci_dev *dev)
1763{
1764 pci_enable_wake(dev, PCI_D0, false);
1765 return pci_set_power_state(dev, PCI_D0);
1766}
1767
6cbf8214
RW
1768/**
1769 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1770 * @dev: PCI device being suspended.
1771 *
1772 * Prepare @dev to generate wake-up events at run time and put it into a low
1773 * power state.
1774 */
1775int pci_finish_runtime_suspend(struct pci_dev *dev)
1776{
1777 pci_power_t target_state = pci_target_state(dev);
1778 int error;
1779
1780 if (target_state == PCI_POWER_ERROR)
1781 return -EIO;
1782
1783 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1784
1785 error = pci_set_power_state(dev, target_state);
1786
1787 if (error)
1788 __pci_enable_wake(dev, target_state, true, false);
1789
1790 return error;
1791}
1792
b67ea761
RW
1793/**
1794 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1795 * @dev: Device to check.
1796 *
1797 * Return true if the device itself is cabable of generating wake-up events
1798 * (through the platform or using the native PCIe PME) or if the device supports
1799 * PME and one of its upstream bridges can generate wake-up events.
1800 */
1801bool pci_dev_run_wake(struct pci_dev *dev)
1802{
1803 struct pci_bus *bus = dev->bus;
1804
1805 if (device_run_wake(&dev->dev))
1806 return true;
1807
1808 if (!dev->pme_support)
1809 return false;
1810
1811 while (bus->parent) {
1812 struct pci_dev *bridge = bus->self;
1813
1814 if (device_run_wake(&bridge->dev))
1815 return true;
1816
1817 bus = bus->parent;
1818 }
1819
1820 /* We have reached the root bus. */
1821 if (bus->bridge)
1822 return device_run_wake(bus->bridge);
1823
1824 return false;
1825}
1826EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1827
eb9d0fe4
RW
1828/**
1829 * pci_pm_init - Initialize PM functions of given PCI device
1830 * @dev: PCI device to handle.
1831 */
1832void pci_pm_init(struct pci_dev *dev)
1833{
1834 int pm;
1835 u16 pmc;
1da177e4 1836
bb910a70 1837 pm_runtime_forbid(&dev->dev);
a1e4d72c 1838 device_enable_async_suspend(&dev->dev);
e80bb09d 1839 dev->wakeup_prepared = false;
bb910a70 1840
337001b6
RW
1841 dev->pm_cap = 0;
1842
eb9d0fe4
RW
1843 /* find PCI PM capability in list */
1844 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1845 if (!pm)
50246dd4 1846 return;
eb9d0fe4
RW
1847 /* Check device's ability to generate PME# */
1848 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 1849
eb9d0fe4
RW
1850 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1851 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1852 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 1853 return;
eb9d0fe4
RW
1854 }
1855
337001b6 1856 dev->pm_cap = pm;
1ae861e6 1857 dev->d3_delay = PCI_PM_D3_WAIT;
337001b6
RW
1858
1859 dev->d1_support = false;
1860 dev->d2_support = false;
1861 if (!pci_no_d1d2(dev)) {
c9ed77ee 1862 if (pmc & PCI_PM_CAP_D1)
337001b6 1863 dev->d1_support = true;
c9ed77ee 1864 if (pmc & PCI_PM_CAP_D2)
337001b6 1865 dev->d2_support = true;
c9ed77ee
BH
1866
1867 if (dev->d1_support || dev->d2_support)
1868 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
1869 dev->d1_support ? " D1" : "",
1870 dev->d2_support ? " D2" : "");
337001b6
RW
1871 }
1872
1873 pmc &= PCI_PM_CAP_PME_MASK;
1874 if (pmc) {
10c3d71d
BH
1875 dev_printk(KERN_DEBUG, &dev->dev,
1876 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
1877 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1878 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1879 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1880 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1881 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 1882 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 1883 dev->pme_poll = true;
eb9d0fe4
RW
1884 /*
1885 * Make device's PM flags reflect the wake-up capability, but
1886 * let the user space enable it to wake up the system as needed.
1887 */
1888 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 1889 /* Disable the PME# generation functionality */
337001b6
RW
1890 pci_pme_active(dev, false);
1891 } else {
1892 dev->pme_support = 0;
eb9d0fe4 1893 }
1da177e4
LT
1894}
1895
eb9c39d0
JB
1896/**
1897 * platform_pci_wakeup_init - init platform wakeup if present
1898 * @dev: PCI device
1899 *
1900 * Some devices don't have PCI PM caps but can still generate wakeup
1901 * events through platform methods (like ACPI events). If @dev supports
1902 * platform wakeup events, set the device flag to indicate as much. This
1903 * may be redundant if the device also supports PCI PM caps, but double
1904 * initialization should be safe in that case.
1905 */
1906void platform_pci_wakeup_init(struct pci_dev *dev)
1907{
1908 if (!platform_pci_can_wakeup(dev))
1909 return;
1910
1911 device_set_wakeup_capable(&dev->dev, true);
eb9c39d0
JB
1912 platform_pci_sleep_wake(dev, false);
1913}
1914
34a4876e
YL
1915static void pci_add_saved_cap(struct pci_dev *pci_dev,
1916 struct pci_cap_saved_state *new_cap)
1917{
1918 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1919}
1920
63f4898a
RW
1921/**
1922 * pci_add_save_buffer - allocate buffer for saving given capability registers
1923 * @dev: the PCI device
1924 * @cap: the capability to allocate the buffer for
1925 * @size: requested size of the buffer
1926 */
1927static int pci_add_cap_save_buffer(
1928 struct pci_dev *dev, char cap, unsigned int size)
1929{
1930 int pos;
1931 struct pci_cap_saved_state *save_state;
1932
1933 pos = pci_find_capability(dev, cap);
1934 if (pos <= 0)
1935 return 0;
1936
1937 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1938 if (!save_state)
1939 return -ENOMEM;
1940
24a4742f
AW
1941 save_state->cap.cap_nr = cap;
1942 save_state->cap.size = size;
63f4898a
RW
1943 pci_add_saved_cap(dev, save_state);
1944
1945 return 0;
1946}
1947
1948/**
1949 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1950 * @dev: the PCI device
1951 */
1952void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1953{
1954 int error;
1955
89858517
YZ
1956 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1957 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
1958 if (error)
1959 dev_err(&dev->dev,
1960 "unable to preallocate PCI Express save buffer\n");
1961
1962 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1963 if (error)
1964 dev_err(&dev->dev,
1965 "unable to preallocate PCI-X save buffer\n");
1966}
1967
f796841e
YL
1968void pci_free_cap_save_buffers(struct pci_dev *dev)
1969{
1970 struct pci_cap_saved_state *tmp;
1971 struct hlist_node *pos, *n;
1972
1973 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1974 kfree(tmp);
1975}
1976
58c3a727
YZ
1977/**
1978 * pci_enable_ari - enable ARI forwarding if hardware support it
1979 * @dev: the PCI device
1980 */
1981void pci_enable_ari(struct pci_dev *dev)
1982{
1983 int pos;
1984 u32 cap;
864d296c 1985 u16 flags, ctrl;
8113587c 1986 struct pci_dev *bridge;
58c3a727 1987
6748dcc2 1988 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
1989 return;
1990
8113587c
ZY
1991 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1992 if (!pos)
58c3a727
YZ
1993 return;
1994
8113587c 1995 bridge = dev->bus->self;
5f4d91a1 1996 if (!bridge || !pci_is_pcie(bridge))
8113587c
ZY
1997 return;
1998
06a1cbaf 1999 pos = pci_pcie_cap(bridge);
58c3a727
YZ
2000 if (!pos)
2001 return;
2002
864d296c
CW
2003 /* ARI is a PCIe v2 feature */
2004 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2005 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2006 return;
2007
8113587c 2008 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2009 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2010 return;
2011
8113587c 2012 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
58c3a727 2013 ctrl |= PCI_EXP_DEVCTL2_ARI;
8113587c 2014 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
58c3a727 2015
8113587c 2016 bridge->ari_enabled = 1;
58c3a727
YZ
2017}
2018
b48d4425
JB
2019/**
2020 * pci_enable_ido - enable ID-based ordering on a device
2021 * @dev: the PCI device
2022 * @type: which types of IDO to enable
2023 *
2024 * Enable ID-based ordering on @dev. @type can contain the bits
2025 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2026 * which types of transactions are allowed to be re-ordered.
2027 */
2028void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2029{
2030 int pos;
2031 u16 ctrl;
2032
2033 pos = pci_pcie_cap(dev);
2034 if (!pos)
2035 return;
2036
2037 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2038 if (type & PCI_EXP_IDO_REQUEST)
2039 ctrl |= PCI_EXP_IDO_REQ_EN;
2040 if (type & PCI_EXP_IDO_COMPLETION)
2041 ctrl |= PCI_EXP_IDO_CMP_EN;
2042 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2043}
2044EXPORT_SYMBOL(pci_enable_ido);
2045
2046/**
2047 * pci_disable_ido - disable ID-based ordering on a device
2048 * @dev: the PCI device
2049 * @type: which types of IDO to disable
2050 */
2051void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2052{
2053 int pos;
2054 u16 ctrl;
2055
2056 if (!pci_is_pcie(dev))
2057 return;
2058
2059 pos = pci_pcie_cap(dev);
2060 if (!pos)
2061 return;
2062
2063 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2064 if (type & PCI_EXP_IDO_REQUEST)
2065 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2066 if (type & PCI_EXP_IDO_COMPLETION)
2067 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2068 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2069}
2070EXPORT_SYMBOL(pci_disable_ido);
2071
48a92a81
JB
2072/**
2073 * pci_enable_obff - enable optimized buffer flush/fill
2074 * @dev: PCI device
2075 * @type: type of signaling to use
2076 *
2077 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2078 * signaling if possible, falling back to message signaling only if
2079 * WAKE# isn't supported. @type should indicate whether the PCIe link
2080 * be brought out of L0s or L1 to send the message. It should be either
2081 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2082 *
2083 * If your device can benefit from receiving all messages, even at the
2084 * power cost of bringing the link back up from a low power state, use
2085 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2086 * preferred type).
2087 *
2088 * RETURNS:
2089 * Zero on success, appropriate error number on failure.
2090 */
2091int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2092{
2093 int pos;
2094 u32 cap;
2095 u16 ctrl;
2096 int ret;
2097
2098 if (!pci_is_pcie(dev))
2099 return -ENOTSUPP;
2100
2101 pos = pci_pcie_cap(dev);
2102 if (!pos)
2103 return -ENOTSUPP;
2104
2105 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2106 if (!(cap & PCI_EXP_OBFF_MASK))
2107 return -ENOTSUPP; /* no OBFF support at all */
2108
2109 /* Make sure the topology supports OBFF as well */
2110 if (dev->bus) {
2111 ret = pci_enable_obff(dev->bus->self, type);
2112 if (ret)
2113 return ret;
2114 }
2115
2116 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2117 if (cap & PCI_EXP_OBFF_WAKE)
2118 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2119 else {
2120 switch (type) {
2121 case PCI_EXP_OBFF_SIGNAL_L0:
2122 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2123 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2124 break;
2125 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2126 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2127 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2128 break;
2129 default:
2130 WARN(1, "bad OBFF signal type\n");
2131 return -ENOTSUPP;
2132 }
2133 }
2134 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2135
2136 return 0;
2137}
2138EXPORT_SYMBOL(pci_enable_obff);
2139
2140/**
2141 * pci_disable_obff - disable optimized buffer flush/fill
2142 * @dev: PCI device
2143 *
2144 * Disable OBFF on @dev.
2145 */
2146void pci_disable_obff(struct pci_dev *dev)
2147{
2148 int pos;
2149 u16 ctrl;
2150
2151 if (!pci_is_pcie(dev))
2152 return;
2153
2154 pos = pci_pcie_cap(dev);
2155 if (!pos)
2156 return;
2157
2158 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2159 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2160 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2161}
2162EXPORT_SYMBOL(pci_disable_obff);
2163
51c2e0a7
JB
2164/**
2165 * pci_ltr_supported - check whether a device supports LTR
2166 * @dev: PCI device
2167 *
2168 * RETURNS:
2169 * True if @dev supports latency tolerance reporting, false otherwise.
2170 */
2171bool pci_ltr_supported(struct pci_dev *dev)
2172{
2173 int pos;
2174 u32 cap;
2175
2176 if (!pci_is_pcie(dev))
2177 return false;
2178
2179 pos = pci_pcie_cap(dev);
2180 if (!pos)
2181 return false;
2182
2183 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2184
2185 return cap & PCI_EXP_DEVCAP2_LTR;
2186}
2187EXPORT_SYMBOL(pci_ltr_supported);
2188
2189/**
2190 * pci_enable_ltr - enable latency tolerance reporting
2191 * @dev: PCI device
2192 *
2193 * Enable LTR on @dev if possible, which means enabling it first on
2194 * upstream ports.
2195 *
2196 * RETURNS:
2197 * Zero on success, errno on failure.
2198 */
2199int pci_enable_ltr(struct pci_dev *dev)
2200{
2201 int pos;
2202 u16 ctrl;
2203 int ret;
2204
2205 if (!pci_ltr_supported(dev))
2206 return -ENOTSUPP;
2207
2208 pos = pci_pcie_cap(dev);
2209 if (!pos)
2210 return -ENOTSUPP;
2211
2212 /* Only primary function can enable/disable LTR */
2213 if (PCI_FUNC(dev->devfn) != 0)
2214 return -EINVAL;
2215
2216 /* Enable upstream ports first */
2217 if (dev->bus) {
2218 ret = pci_enable_ltr(dev->bus->self);
2219 if (ret)
2220 return ret;
2221 }
2222
2223 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2224 ctrl |= PCI_EXP_LTR_EN;
2225 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2226
2227 return 0;
2228}
2229EXPORT_SYMBOL(pci_enable_ltr);
2230
2231/**
2232 * pci_disable_ltr - disable latency tolerance reporting
2233 * @dev: PCI device
2234 */
2235void pci_disable_ltr(struct pci_dev *dev)
2236{
2237 int pos;
2238 u16 ctrl;
2239
2240 if (!pci_ltr_supported(dev))
2241 return;
2242
2243 pos = pci_pcie_cap(dev);
2244 if (!pos)
2245 return;
2246
2247 /* Only primary function can enable/disable LTR */
2248 if (PCI_FUNC(dev->devfn) != 0)
2249 return;
2250
2251 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2252 ctrl &= ~PCI_EXP_LTR_EN;
2253 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2254}
2255EXPORT_SYMBOL(pci_disable_ltr);
2256
2257static int __pci_ltr_scale(int *val)
2258{
2259 int scale = 0;
2260
2261 while (*val > 1023) {
2262 *val = (*val + 31) / 32;
2263 scale++;
2264 }
2265 return scale;
2266}
2267
2268/**
2269 * pci_set_ltr - set LTR latency values
2270 * @dev: PCI device
2271 * @snoop_lat_ns: snoop latency in nanoseconds
2272 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2273 *
2274 * Figure out the scale and set the LTR values accordingly.
2275 */
2276int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2277{
2278 int pos, ret, snoop_scale, nosnoop_scale;
2279 u16 val;
2280
2281 if (!pci_ltr_supported(dev))
2282 return -ENOTSUPP;
2283
2284 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2285 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2286
2287 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2288 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2289 return -EINVAL;
2290
2291 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2292 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2293 return -EINVAL;
2294
2295 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2296 if (!pos)
2297 return -ENOTSUPP;
2298
2299 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2300 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2301 if (ret != 4)
2302 return -EIO;
2303
2304 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2305 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2306 if (ret != 4)
2307 return -EIO;
2308
2309 return 0;
2310}
2311EXPORT_SYMBOL(pci_set_ltr);
2312
5d990b62
CW
2313static int pci_acs_enable;
2314
2315/**
2316 * pci_request_acs - ask for ACS to be enabled if supported
2317 */
2318void pci_request_acs(void)
2319{
2320 pci_acs_enable = 1;
2321}
2322
ae21ee65
AK
2323/**
2324 * pci_enable_acs - enable ACS if hardware support it
2325 * @dev: the PCI device
2326 */
2327void pci_enable_acs(struct pci_dev *dev)
2328{
2329 int pos;
2330 u16 cap;
2331 u16 ctrl;
2332
5d990b62
CW
2333 if (!pci_acs_enable)
2334 return;
2335
5f4d91a1 2336 if (!pci_is_pcie(dev))
ae21ee65
AK
2337 return;
2338
2339 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2340 if (!pos)
2341 return;
2342
2343 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2344 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2345
2346 /* Source Validation */
2347 ctrl |= (cap & PCI_ACS_SV);
2348
2349 /* P2P Request Redirect */
2350 ctrl |= (cap & PCI_ACS_RR);
2351
2352 /* P2P Completion Redirect */
2353 ctrl |= (cap & PCI_ACS_CR);
2354
2355 /* Upstream Forwarding */
2356 ctrl |= (cap & PCI_ACS_UF);
2357
2358 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2359}
2360
57c2cf71
BH
2361/**
2362 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2363 * @dev: the PCI device
2364 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2365 *
2366 * Perform INTx swizzling for a device behind one level of bridge. This is
2367 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2368 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2369 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2370 * the PCI Express Base Specification, Revision 2.1)
57c2cf71
BH
2371 */
2372u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2373{
46b952a3
MW
2374 int slot;
2375
2376 if (pci_ari_enabled(dev->bus))
2377 slot = 0;
2378 else
2379 slot = PCI_SLOT(dev->devfn);
2380
2381 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2382}
2383
1da177e4
LT
2384int
2385pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2386{
2387 u8 pin;
2388
514d207d 2389 pin = dev->pin;
1da177e4
LT
2390 if (!pin)
2391 return -1;
878f2e50 2392
8784fd4d 2393 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2394 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2395 dev = dev->bus->self;
2396 }
2397 *bridge = dev;
2398 return pin;
2399}
2400
68feac87
BH
2401/**
2402 * pci_common_swizzle - swizzle INTx all the way to root bridge
2403 * @dev: the PCI device
2404 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2405 *
2406 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2407 * bridges all the way up to a PCI root bus.
2408 */
2409u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2410{
2411 u8 pin = *pinp;
2412
1eb39487 2413 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2414 pin = pci_swizzle_interrupt_pin(dev, pin);
2415 dev = dev->bus->self;
2416 }
2417 *pinp = pin;
2418 return PCI_SLOT(dev->devfn);
2419}
2420
1da177e4
LT
2421/**
2422 * pci_release_region - Release a PCI bar
2423 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2424 * @bar: BAR to release
2425 *
2426 * Releases the PCI I/O and memory resources previously reserved by a
2427 * successful call to pci_request_region. Call this function only
2428 * after all use of the PCI regions has ceased.
2429 */
2430void pci_release_region(struct pci_dev *pdev, int bar)
2431{
9ac7849e
TH
2432 struct pci_devres *dr;
2433
1da177e4
LT
2434 if (pci_resource_len(pdev, bar) == 0)
2435 return;
2436 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2437 release_region(pci_resource_start(pdev, bar),
2438 pci_resource_len(pdev, bar));
2439 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2440 release_mem_region(pci_resource_start(pdev, bar),
2441 pci_resource_len(pdev, bar));
9ac7849e
TH
2442
2443 dr = find_pci_dr(pdev);
2444 if (dr)
2445 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2446}
2447
2448/**
f5ddcac4 2449 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2450 * @pdev: PCI device whose resources are to be reserved
2451 * @bar: BAR to be reserved
2452 * @res_name: Name to be associated with resource.
f5ddcac4 2453 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2454 *
2455 * Mark the PCI region associated with PCI device @pdev BR @bar as
2456 * being reserved by owner @res_name. Do not access any
2457 * address inside the PCI regions unless this call returns
2458 * successfully.
2459 *
f5ddcac4
RD
2460 * If @exclusive is set, then the region is marked so that userspace
2461 * is explicitly not allowed to map the resource via /dev/mem or
2462 * sysfs MMIO access.
2463 *
1da177e4
LT
2464 * Returns 0 on success, or %EBUSY on error. A warning
2465 * message is also printed on failure.
2466 */
e8de1481
AV
2467static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2468 int exclusive)
1da177e4 2469{
9ac7849e
TH
2470 struct pci_devres *dr;
2471
1da177e4
LT
2472 if (pci_resource_len(pdev, bar) == 0)
2473 return 0;
2474
2475 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2476 if (!request_region(pci_resource_start(pdev, bar),
2477 pci_resource_len(pdev, bar), res_name))
2478 goto err_out;
2479 }
2480 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2481 if (!__request_mem_region(pci_resource_start(pdev, bar),
2482 pci_resource_len(pdev, bar), res_name,
2483 exclusive))
1da177e4
LT
2484 goto err_out;
2485 }
9ac7849e
TH
2486
2487 dr = find_pci_dr(pdev);
2488 if (dr)
2489 dr->region_mask |= 1 << bar;
2490
1da177e4
LT
2491 return 0;
2492
2493err_out:
c7dabef8 2494 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2495 &pdev->resource[bar]);
1da177e4
LT
2496 return -EBUSY;
2497}
2498
e8de1481 2499/**
f5ddcac4 2500 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2501 * @pdev: PCI device whose resources are to be reserved
2502 * @bar: BAR to be reserved
f5ddcac4 2503 * @res_name: Name to be associated with resource
e8de1481 2504 *
f5ddcac4 2505 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2506 * being reserved by owner @res_name. Do not access any
2507 * address inside the PCI regions unless this call returns
2508 * successfully.
2509 *
2510 * Returns 0 on success, or %EBUSY on error. A warning
2511 * message is also printed on failure.
2512 */
2513int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2514{
2515 return __pci_request_region(pdev, bar, res_name, 0);
2516}
2517
2518/**
2519 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2520 * @pdev: PCI device whose resources are to be reserved
2521 * @bar: BAR to be reserved
2522 * @res_name: Name to be associated with resource.
2523 *
2524 * Mark the PCI region associated with PCI device @pdev BR @bar as
2525 * being reserved by owner @res_name. Do not access any
2526 * address inside the PCI regions unless this call returns
2527 * successfully.
2528 *
2529 * Returns 0 on success, or %EBUSY on error. A warning
2530 * message is also printed on failure.
2531 *
2532 * The key difference that _exclusive makes it that userspace is
2533 * explicitly not allowed to map the resource via /dev/mem or
2534 * sysfs.
2535 */
2536int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2537{
2538 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2539}
c87deff7
HS
2540/**
2541 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2542 * @pdev: PCI device whose resources were previously reserved
2543 * @bars: Bitmask of BARs to be released
2544 *
2545 * Release selected PCI I/O and memory resources previously reserved.
2546 * Call this function only after all use of the PCI regions has ceased.
2547 */
2548void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2549{
2550 int i;
2551
2552 for (i = 0; i < 6; i++)
2553 if (bars & (1 << i))
2554 pci_release_region(pdev, i);
2555}
2556
e8de1481
AV
2557int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2558 const char *res_name, int excl)
c87deff7
HS
2559{
2560 int i;
2561
2562 for (i = 0; i < 6; i++)
2563 if (bars & (1 << i))
e8de1481 2564 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2565 goto err_out;
2566 return 0;
2567
2568err_out:
2569 while(--i >= 0)
2570 if (bars & (1 << i))
2571 pci_release_region(pdev, i);
2572
2573 return -EBUSY;
2574}
1da177e4 2575
e8de1481
AV
2576
2577/**
2578 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2579 * @pdev: PCI device whose resources are to be reserved
2580 * @bars: Bitmask of BARs to be requested
2581 * @res_name: Name to be associated with resource
2582 */
2583int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2584 const char *res_name)
2585{
2586 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2587}
2588
2589int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2590 int bars, const char *res_name)
2591{
2592 return __pci_request_selected_regions(pdev, bars, res_name,
2593 IORESOURCE_EXCLUSIVE);
2594}
2595
1da177e4
LT
2596/**
2597 * pci_release_regions - Release reserved PCI I/O and memory resources
2598 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2599 *
2600 * Releases all PCI I/O and memory resources previously reserved by a
2601 * successful call to pci_request_regions. Call this function only
2602 * after all use of the PCI regions has ceased.
2603 */
2604
2605void pci_release_regions(struct pci_dev *pdev)
2606{
c87deff7 2607 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2608}
2609
2610/**
2611 * pci_request_regions - Reserved PCI I/O and memory resources
2612 * @pdev: PCI device whose resources are to be reserved
2613 * @res_name: Name to be associated with resource.
2614 *
2615 * Mark all PCI regions associated with PCI device @pdev as
2616 * being reserved by owner @res_name. Do not access any
2617 * address inside the PCI regions unless this call returns
2618 * successfully.
2619 *
2620 * Returns 0 on success, or %EBUSY on error. A warning
2621 * message is also printed on failure.
2622 */
3c990e92 2623int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2624{
c87deff7 2625 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2626}
2627
e8de1481
AV
2628/**
2629 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2630 * @pdev: PCI device whose resources are to be reserved
2631 * @res_name: Name to be associated with resource.
2632 *
2633 * Mark all PCI regions associated with PCI device @pdev as
2634 * being reserved by owner @res_name. Do not access any
2635 * address inside the PCI regions unless this call returns
2636 * successfully.
2637 *
2638 * pci_request_regions_exclusive() will mark the region so that
2639 * /dev/mem and the sysfs MMIO access will not be allowed.
2640 *
2641 * Returns 0 on success, or %EBUSY on error. A warning
2642 * message is also printed on failure.
2643 */
2644int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2645{
2646 return pci_request_selected_regions_exclusive(pdev,
2647 ((1 << 6) - 1), res_name);
2648}
2649
6a479079
BH
2650static void __pci_set_master(struct pci_dev *dev, bool enable)
2651{
2652 u16 old_cmd, cmd;
2653
2654 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2655 if (enable)
2656 cmd = old_cmd | PCI_COMMAND_MASTER;
2657 else
2658 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2659 if (cmd != old_cmd) {
2660 dev_dbg(&dev->dev, "%s bus mastering\n",
2661 enable ? "enabling" : "disabling");
2662 pci_write_config_word(dev, PCI_COMMAND, cmd);
2663 }
2664 dev->is_busmaster = enable;
2665}
e8de1481 2666
96c55900
MS
2667/**
2668 * pcibios_set_master - enable PCI bus-mastering for device dev
2669 * @dev: the PCI device to enable
2670 *
2671 * Enables PCI bus-mastering for the device. This is the default
2672 * implementation. Architecture specific implementations can override
2673 * this if necessary.
2674 */
2675void __weak pcibios_set_master(struct pci_dev *dev)
2676{
2677 u8 lat;
2678
f676678f
MS
2679 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2680 if (pci_is_pcie(dev))
2681 return;
2682
96c55900
MS
2683 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2684 if (lat < 16)
2685 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2686 else if (lat > pcibios_max_latency)
2687 lat = pcibios_max_latency;
2688 else
2689 return;
2690 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2691 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2692}
2693
1da177e4
LT
2694/**
2695 * pci_set_master - enables bus-mastering for device dev
2696 * @dev: the PCI device to enable
2697 *
2698 * Enables bus-mastering on the device and calls pcibios_set_master()
2699 * to do the needed arch specific settings.
2700 */
6a479079 2701void pci_set_master(struct pci_dev *dev)
1da177e4 2702{
6a479079 2703 __pci_set_master(dev, true);
1da177e4
LT
2704 pcibios_set_master(dev);
2705}
2706
6a479079
BH
2707/**
2708 * pci_clear_master - disables bus-mastering for device dev
2709 * @dev: the PCI device to disable
2710 */
2711void pci_clear_master(struct pci_dev *dev)
2712{
2713 __pci_set_master(dev, false);
2714}
2715
1da177e4 2716/**
edb2d97e
MW
2717 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2718 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2719 *
edb2d97e
MW
2720 * Helper function for pci_set_mwi.
2721 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2722 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2723 *
2724 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2725 */
15ea76d4 2726int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2727{
2728 u8 cacheline_size;
2729
2730 if (!pci_cache_line_size)
15ea76d4 2731 return -EINVAL;
1da177e4
LT
2732
2733 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2734 equal to or multiple of the right value. */
2735 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2736 if (cacheline_size >= pci_cache_line_size &&
2737 (cacheline_size % pci_cache_line_size) == 0)
2738 return 0;
2739
2740 /* Write the correct value. */
2741 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2742 /* Read it back. */
2743 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2744 if (cacheline_size == pci_cache_line_size)
2745 return 0;
2746
80ccba11
BH
2747 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2748 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2749
2750 return -EINVAL;
2751}
15ea76d4
TH
2752EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2753
2754#ifdef PCI_DISABLE_MWI
2755int pci_set_mwi(struct pci_dev *dev)
2756{
2757 return 0;
2758}
2759
2760int pci_try_set_mwi(struct pci_dev *dev)
2761{
2762 return 0;
2763}
2764
2765void pci_clear_mwi(struct pci_dev *dev)
2766{
2767}
2768
2769#else
1da177e4
LT
2770
2771/**
2772 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2773 * @dev: the PCI device for which MWI is enabled
2774 *
694625c0 2775 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2776 *
2777 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2778 */
2779int
2780pci_set_mwi(struct pci_dev *dev)
2781{
2782 int rc;
2783 u16 cmd;
2784
edb2d97e 2785 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2786 if (rc)
2787 return rc;
2788
2789 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2790 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2791 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2792 cmd |= PCI_COMMAND_INVALIDATE;
2793 pci_write_config_word(dev, PCI_COMMAND, cmd);
2794 }
2795
2796 return 0;
2797}
2798
694625c0
RD
2799/**
2800 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2801 * @dev: the PCI device for which MWI is enabled
2802 *
2803 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2804 * Callers are not required to check the return value.
2805 *
2806 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2807 */
2808int pci_try_set_mwi(struct pci_dev *dev)
2809{
2810 int rc = pci_set_mwi(dev);
2811 return rc;
2812}
2813
1da177e4
LT
2814/**
2815 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2816 * @dev: the PCI device to disable
2817 *
2818 * Disables PCI Memory-Write-Invalidate transaction on the device
2819 */
2820void
2821pci_clear_mwi(struct pci_dev *dev)
2822{
2823 u16 cmd;
2824
2825 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2826 if (cmd & PCI_COMMAND_INVALIDATE) {
2827 cmd &= ~PCI_COMMAND_INVALIDATE;
2828 pci_write_config_word(dev, PCI_COMMAND, cmd);
2829 }
2830}
edb2d97e 2831#endif /* ! PCI_DISABLE_MWI */
1da177e4 2832
a04ce0ff
BR
2833/**
2834 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2835 * @pdev: the PCI device to operate on
2836 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2837 *
2838 * Enables/disables PCI INTx for device dev
2839 */
2840void
2841pci_intx(struct pci_dev *pdev, int enable)
2842{
2843 u16 pci_command, new;
2844
2845 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2846
2847 if (enable) {
2848 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2849 } else {
2850 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2851 }
2852
2853 if (new != pci_command) {
9ac7849e
TH
2854 struct pci_devres *dr;
2855
2fd9d74b 2856 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
2857
2858 dr = find_pci_dr(pdev);
2859 if (dr && !dr->restore_intx) {
2860 dr->restore_intx = 1;
2861 dr->orig_intx = !enable;
2862 }
a04ce0ff
BR
2863 }
2864}
2865
a2e27787
JK
2866/**
2867 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 2868 * @dev: the PCI device to operate on
a2e27787
JK
2869 *
2870 * Check if the device dev support INTx masking via the config space
2871 * command word.
2872 */
2873bool pci_intx_mask_supported(struct pci_dev *dev)
2874{
2875 bool mask_supported = false;
2876 u16 orig, new;
2877
2878 pci_cfg_access_lock(dev);
2879
2880 pci_read_config_word(dev, PCI_COMMAND, &orig);
2881 pci_write_config_word(dev, PCI_COMMAND,
2882 orig ^ PCI_COMMAND_INTX_DISABLE);
2883 pci_read_config_word(dev, PCI_COMMAND, &new);
2884
2885 /*
2886 * There's no way to protect against hardware bugs or detect them
2887 * reliably, but as long as we know what the value should be, let's
2888 * go ahead and check it.
2889 */
2890 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2891 dev_err(&dev->dev, "Command register changed from "
2892 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2893 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2894 mask_supported = true;
2895 pci_write_config_word(dev, PCI_COMMAND, orig);
2896 }
2897
2898 pci_cfg_access_unlock(dev);
2899 return mask_supported;
2900}
2901EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2902
2903static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2904{
2905 struct pci_bus *bus = dev->bus;
2906 bool mask_updated = true;
2907 u32 cmd_status_dword;
2908 u16 origcmd, newcmd;
2909 unsigned long flags;
2910 bool irq_pending;
2911
2912 /*
2913 * We do a single dword read to retrieve both command and status.
2914 * Document assumptions that make this possible.
2915 */
2916 BUILD_BUG_ON(PCI_COMMAND % 4);
2917 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2918
2919 raw_spin_lock_irqsave(&pci_lock, flags);
2920
2921 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2922
2923 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2924
2925 /*
2926 * Check interrupt status register to see whether our device
2927 * triggered the interrupt (when masking) or the next IRQ is
2928 * already pending (when unmasking).
2929 */
2930 if (mask != irq_pending) {
2931 mask_updated = false;
2932 goto done;
2933 }
2934
2935 origcmd = cmd_status_dword;
2936 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2937 if (mask)
2938 newcmd |= PCI_COMMAND_INTX_DISABLE;
2939 if (newcmd != origcmd)
2940 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2941
2942done:
2943 raw_spin_unlock_irqrestore(&pci_lock, flags);
2944
2945 return mask_updated;
2946}
2947
2948/**
2949 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 2950 * @dev: the PCI device to operate on
a2e27787
JK
2951 *
2952 * Check if the device dev has its INTx line asserted, mask it and
2953 * return true in that case. False is returned if not interrupt was
2954 * pending.
2955 */
2956bool pci_check_and_mask_intx(struct pci_dev *dev)
2957{
2958 return pci_check_and_set_intx_mask(dev, true);
2959}
2960EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2961
2962/**
2963 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
6e9292c5 2964 * @dev: the PCI device to operate on
a2e27787
JK
2965 *
2966 * Check if the device dev has its INTx line asserted, unmask it if not
2967 * and return true. False is returned and the mask remains active if
2968 * there was still an interrupt pending.
2969 */
2970bool pci_check_and_unmask_intx(struct pci_dev *dev)
2971{
2972 return pci_check_and_set_intx_mask(dev, false);
2973}
2974EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2975
f5f2b131
EB
2976/**
2977 * pci_msi_off - disables any msi or msix capabilities
8d7d86e9 2978 * @dev: the PCI device to operate on
f5f2b131
EB
2979 *
2980 * If you want to use msi see pci_enable_msi and friends.
2981 * This is a lower level primitive that allows us to disable
2982 * msi operation at the device level.
2983 */
2984void pci_msi_off(struct pci_dev *dev)
2985{
2986 int pos;
2987 u16 control;
2988
2989 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2990 if (pos) {
2991 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2992 control &= ~PCI_MSI_FLAGS_ENABLE;
2993 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2994 }
2995 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2996 if (pos) {
2997 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2998 control &= ~PCI_MSIX_FLAGS_ENABLE;
2999 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3000 }
3001}
b03214d5 3002EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 3003
4d57cdfa
FT
3004int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3005{
3006 return dma_set_max_seg_size(&dev->dev, size);
3007}
3008EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3009
59fc67de
FT
3010int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3011{
3012 return dma_set_seg_boundary(&dev->dev, mask);
3013}
3014EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3015
8c1c699f 3016static int pcie_flr(struct pci_dev *dev, int probe)
8dd7f803 3017{
8c1c699f
YZ
3018 int i;
3019 int pos;
8dd7f803 3020 u32 cap;
04b55c47 3021 u16 status, control;
8dd7f803 3022
06a1cbaf 3023 pos = pci_pcie_cap(dev);
8c1c699f 3024 if (!pos)
8dd7f803 3025 return -ENOTTY;
8c1c699f
YZ
3026
3027 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
8dd7f803
SY
3028 if (!(cap & PCI_EXP_DEVCAP_FLR))
3029 return -ENOTTY;
3030
d91cdc74
SY
3031 if (probe)
3032 return 0;
3033
8dd7f803 3034 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3035 for (i = 0; i < 4; i++) {
3036 if (i)
3037 msleep((1 << (i - 1)) * 100);
5fe5db05 3038
8c1c699f
YZ
3039 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3040 if (!(status & PCI_EXP_DEVSTA_TRPND))
3041 goto clear;
3042 }
3043
3044 dev_err(&dev->dev, "transaction is not cleared; "
3045 "proceeding with reset anyway\n");
3046
3047clear:
04b55c47
SR
3048 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3049 control |= PCI_EXP_DEVCTL_BCR_FLR;
3050 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3051
8c1c699f 3052 msleep(100);
8dd7f803 3053
8dd7f803
SY
3054 return 0;
3055}
d91cdc74 3056
8c1c699f 3057static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3058{
8c1c699f
YZ
3059 int i;
3060 int pos;
1ca88797 3061 u8 cap;
8c1c699f 3062 u8 status;
1ca88797 3063
8c1c699f
YZ
3064 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3065 if (!pos)
1ca88797 3066 return -ENOTTY;
8c1c699f
YZ
3067
3068 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3069 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3070 return -ENOTTY;
3071
3072 if (probe)
3073 return 0;
3074
1ca88797 3075 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3076 for (i = 0; i < 4; i++) {
3077 if (i)
3078 msleep((1 << (i - 1)) * 100);
3079
3080 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3081 if (!(status & PCI_AF_STATUS_TP))
3082 goto clear;
3083 }
5fe5db05 3084
8c1c699f
YZ
3085 dev_err(&dev->dev, "transaction is not cleared; "
3086 "proceeding with reset anyway\n");
5fe5db05 3087
8c1c699f
YZ
3088clear:
3089 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3090 msleep(100);
8c1c699f 3091
1ca88797
SY
3092 return 0;
3093}
3094
83d74e03
RW
3095/**
3096 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3097 * @dev: Device to reset.
3098 * @probe: If set, only check if the device can be reset this way.
3099 *
3100 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3101 * unset, it will be reinitialized internally when going from PCI_D3hot to
3102 * PCI_D0. If that's the case and the device is not in a low-power state
3103 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3104 *
3105 * NOTE: This causes the caller to sleep for twice the device power transition
3106 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3107 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3108 * Moreover, only devices in D0 can be reset by this function.
3109 */
f85876ba 3110static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3111{
f85876ba
YZ
3112 u16 csr;
3113
3114 if (!dev->pm_cap)
3115 return -ENOTTY;
d91cdc74 3116
f85876ba
YZ
3117 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3118 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3119 return -ENOTTY;
d91cdc74 3120
f85876ba
YZ
3121 if (probe)
3122 return 0;
1ca88797 3123
f85876ba
YZ
3124 if (dev->current_state != PCI_D0)
3125 return -EINVAL;
3126
3127 csr &= ~PCI_PM_CTRL_STATE_MASK;
3128 csr |= PCI_D3hot;
3129 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3130 pci_dev_d3_sleep(dev);
f85876ba
YZ
3131
3132 csr &= ~PCI_PM_CTRL_STATE_MASK;
3133 csr |= PCI_D0;
3134 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3135 pci_dev_d3_sleep(dev);
f85876ba
YZ
3136
3137 return 0;
3138}
3139
c12ff1df
YZ
3140static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3141{
3142 u16 ctrl;
3143 struct pci_dev *pdev;
3144
654b75e0 3145 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3146 return -ENOTTY;
3147
3148 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3149 if (pdev != dev)
3150 return -ENOTTY;
3151
3152 if (probe)
3153 return 0;
3154
3155 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3156 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3157 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3158 msleep(100);
3159
3160 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3161 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3162 msleep(100);
3163
3164 return 0;
3165}
3166
8c1c699f 3167static int pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3168{
8c1c699f
YZ
3169 int rc;
3170
3171 might_sleep();
3172
3173 if (!probe) {
fb51ccbf 3174 pci_cfg_access_lock(dev);
8c1c699f 3175 /* block PM suspend, driver probe, etc. */
8e9394ce 3176 device_lock(&dev->dev);
8c1c699f 3177 }
d91cdc74 3178
b9c3b266
DC
3179 rc = pci_dev_specific_reset(dev, probe);
3180 if (rc != -ENOTTY)
3181 goto done;
3182
8c1c699f
YZ
3183 rc = pcie_flr(dev, probe);
3184 if (rc != -ENOTTY)
3185 goto done;
d91cdc74 3186
8c1c699f 3187 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3188 if (rc != -ENOTTY)
3189 goto done;
3190
3191 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3192 if (rc != -ENOTTY)
3193 goto done;
3194
3195 rc = pci_parent_bus_reset(dev, probe);
8c1c699f
YZ
3196done:
3197 if (!probe) {
8e9394ce 3198 device_unlock(&dev->dev);
fb51ccbf 3199 pci_cfg_access_unlock(dev);
8c1c699f 3200 }
1ca88797 3201
8c1c699f 3202 return rc;
d91cdc74
SY
3203}
3204
3205/**
8c1c699f
YZ
3206 * __pci_reset_function - reset a PCI device function
3207 * @dev: PCI device to reset
d91cdc74
SY
3208 *
3209 * Some devices allow an individual function to be reset without affecting
3210 * other functions in the same device. The PCI device must be responsive
3211 * to PCI config space in order to use this function.
3212 *
3213 * The device function is presumed to be unused when this function is called.
3214 * Resetting the device will make the contents of PCI configuration space
3215 * random, so any caller of this must be prepared to reinitialise the
3216 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3217 * etc.
3218 *
8c1c699f 3219 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3220 * device doesn't support resetting a single function.
3221 */
8c1c699f 3222int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3223{
8c1c699f 3224 return pci_dev_reset(dev, 0);
d91cdc74 3225}
8c1c699f 3226EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3227
6fbf9e7a
KRW
3228/**
3229 * __pci_reset_function_locked - reset a PCI device function while holding
3230 * the @dev mutex lock.
3231 * @dev: PCI device to reset
3232 *
3233 * Some devices allow an individual function to be reset without affecting
3234 * other functions in the same device. The PCI device must be responsive
3235 * to PCI config space in order to use this function.
3236 *
3237 * The device function is presumed to be unused and the caller is holding
3238 * the device mutex lock when this function is called.
3239 * Resetting the device will make the contents of PCI configuration space
3240 * random, so any caller of this must be prepared to reinitialise the
3241 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3242 * etc.
3243 *
3244 * Returns 0 if the device function was successfully reset or negative if the
3245 * device doesn't support resetting a single function.
3246 */
3247int __pci_reset_function_locked(struct pci_dev *dev)
3248{
3249 return pci_dev_reset(dev, 1);
3250}
3251EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3252
711d5779
MT
3253/**
3254 * pci_probe_reset_function - check whether the device can be safely reset
3255 * @dev: PCI device to reset
3256 *
3257 * Some devices allow an individual function to be reset without affecting
3258 * other functions in the same device. The PCI device must be responsive
3259 * to PCI config space in order to use this function.
3260 *
3261 * Returns 0 if the device function can be reset or negative if the
3262 * device doesn't support resetting a single function.
3263 */
3264int pci_probe_reset_function(struct pci_dev *dev)
3265{
3266 return pci_dev_reset(dev, 1);
3267}
3268
8dd7f803 3269/**
8c1c699f
YZ
3270 * pci_reset_function - quiesce and reset a PCI device function
3271 * @dev: PCI device to reset
8dd7f803
SY
3272 *
3273 * Some devices allow an individual function to be reset without affecting
3274 * other functions in the same device. The PCI device must be responsive
3275 * to PCI config space in order to use this function.
3276 *
3277 * This function does not just reset the PCI portion of a device, but
3278 * clears all the state associated with the device. This function differs
8c1c699f 3279 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3280 * over the reset.
3281 *
8c1c699f 3282 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3283 * device doesn't support resetting a single function.
3284 */
3285int pci_reset_function(struct pci_dev *dev)
3286{
8c1c699f 3287 int rc;
8dd7f803 3288
8c1c699f
YZ
3289 rc = pci_dev_reset(dev, 1);
3290 if (rc)
3291 return rc;
8dd7f803 3292
8dd7f803
SY
3293 pci_save_state(dev);
3294
8c1c699f
YZ
3295 /*
3296 * both INTx and MSI are disabled after the Interrupt Disable bit
3297 * is set and the Bus Master bit is cleared.
3298 */
8dd7f803
SY
3299 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3300
8c1c699f 3301 rc = pci_dev_reset(dev, 0);
8dd7f803
SY
3302
3303 pci_restore_state(dev);
8dd7f803 3304
8c1c699f 3305 return rc;
8dd7f803
SY
3306}
3307EXPORT_SYMBOL_GPL(pci_reset_function);
3308
d556ad4b
PO
3309/**
3310 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3311 * @dev: PCI device to query
3312 *
3313 * Returns mmrbc: maximum designed memory read count in bytes
3314 * or appropriate error value.
3315 */
3316int pcix_get_max_mmrbc(struct pci_dev *dev)
3317{
7c9e2b1c 3318 int cap;
d556ad4b
PO
3319 u32 stat;
3320
3321 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3322 if (!cap)
3323 return -EINVAL;
3324
7c9e2b1c 3325 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3326 return -EINVAL;
3327
25daeb55 3328 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3329}
3330EXPORT_SYMBOL(pcix_get_max_mmrbc);
3331
3332/**
3333 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3334 * @dev: PCI device to query
3335 *
3336 * Returns mmrbc: maximum memory read count in bytes
3337 * or appropriate error value.
3338 */
3339int pcix_get_mmrbc(struct pci_dev *dev)
3340{
7c9e2b1c 3341 int cap;
bdc2bda7 3342 u16 cmd;
d556ad4b
PO
3343
3344 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3345 if (!cap)
3346 return -EINVAL;
3347
7c9e2b1c
DN
3348 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3349 return -EINVAL;
d556ad4b 3350
7c9e2b1c 3351 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3352}
3353EXPORT_SYMBOL(pcix_get_mmrbc);
3354
3355/**
3356 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3357 * @dev: PCI device to query
3358 * @mmrbc: maximum memory read count in bytes
3359 * valid values are 512, 1024, 2048, 4096
3360 *
3361 * If possible sets maximum memory read byte count, some bridges have erratas
3362 * that prevent this.
3363 */
3364int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3365{
7c9e2b1c 3366 int cap;
bdc2bda7
DN
3367 u32 stat, v, o;
3368 u16 cmd;
d556ad4b 3369
229f5afd 3370 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3371 return -EINVAL;
d556ad4b
PO
3372
3373 v = ffs(mmrbc) - 10;
3374
3375 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3376 if (!cap)
7c9e2b1c 3377 return -EINVAL;
d556ad4b 3378
7c9e2b1c
DN
3379 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3380 return -EINVAL;
d556ad4b
PO
3381
3382 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3383 return -E2BIG;
3384
7c9e2b1c
DN
3385 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3386 return -EINVAL;
d556ad4b
PO
3387
3388 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3389 if (o != v) {
3390 if (v > o && dev->bus &&
3391 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3392 return -EIO;
3393
3394 cmd &= ~PCI_X_CMD_MAX_READ;
3395 cmd |= v << 2;
7c9e2b1c
DN
3396 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3397 return -EIO;
d556ad4b 3398 }
7c9e2b1c 3399 return 0;
d556ad4b
PO
3400}
3401EXPORT_SYMBOL(pcix_set_mmrbc);
3402
3403/**
3404 * pcie_get_readrq - get PCI Express read request size
3405 * @dev: PCI device to query
3406 *
3407 * Returns maximum memory read request in bytes
3408 * or appropriate error value.
3409 */
3410int pcie_get_readrq(struct pci_dev *dev)
3411{
3412 int ret, cap;
3413 u16 ctl;
3414
06a1cbaf 3415 cap = pci_pcie_cap(dev);
d556ad4b
PO
3416 if (!cap)
3417 return -EINVAL;
3418
3419 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3420 if (!ret)
93e75fab 3421 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3422
3423 return ret;
3424}
3425EXPORT_SYMBOL(pcie_get_readrq);
3426
3427/**
3428 * pcie_set_readrq - set PCI Express maximum memory read request
3429 * @dev: PCI device to query
42e61f4a 3430 * @rq: maximum memory read count in bytes
d556ad4b
PO
3431 * valid values are 128, 256, 512, 1024, 2048, 4096
3432 *
c9b378c7 3433 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3434 */
3435int pcie_set_readrq(struct pci_dev *dev, int rq)
3436{
3437 int cap, err = -EINVAL;
3438 u16 ctl, v;
3439
229f5afd 3440 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
d556ad4b
PO
3441 goto out;
3442
06a1cbaf 3443 cap = pci_pcie_cap(dev);
d556ad4b
PO
3444 if (!cap)
3445 goto out;
3446
3447 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3448 if (err)
3449 goto out;
a1c473aa
BH
3450 /*
3451 * If using the "performance" PCIe config, we clamp the
3452 * read rq size to the max packet size to prevent the
3453 * host bridge generating requests larger than we can
3454 * cope with
3455 */
3456 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3457 int mps = pcie_get_mps(dev);
3458
3459 if (mps < 0)
3460 return mps;
3461 if (mps < rq)
3462 rq = mps;
3463 }
3464
3465 v = (ffs(rq) - 8) << 12;
d556ad4b
PO
3466
3467 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3468 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3469 ctl |= v;
c9b378c7 3470 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
d556ad4b
PO
3471 }
3472
3473out:
3474 return err;
3475}
3476EXPORT_SYMBOL(pcie_set_readrq);
3477
b03e7495
JM
3478/**
3479 * pcie_get_mps - get PCI Express maximum payload size
3480 * @dev: PCI device to query
3481 *
3482 * Returns maximum payload size in bytes
3483 * or appropriate error value.
3484 */
3485int pcie_get_mps(struct pci_dev *dev)
3486{
3487 int ret, cap;
3488 u16 ctl;
3489
3490 cap = pci_pcie_cap(dev);
3491 if (!cap)
3492 return -EINVAL;
3493
3494 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3495 if (!ret)
3496 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3497
3498 return ret;
3499}
3500
3501/**
3502 * pcie_set_mps - set PCI Express maximum payload size
3503 * @dev: PCI device to query
47c08f31 3504 * @mps: maximum payload size in bytes
b03e7495
JM
3505 * valid values are 128, 256, 512, 1024, 2048, 4096
3506 *
3507 * If possible sets maximum payload size
3508 */
3509int pcie_set_mps(struct pci_dev *dev, int mps)
3510{
3511 int cap, err = -EINVAL;
3512 u16 ctl, v;
3513
3514 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3515 goto out;
3516
3517 v = ffs(mps) - 8;
3518 if (v > dev->pcie_mpss)
3519 goto out;
3520 v <<= 5;
3521
3522 cap = pci_pcie_cap(dev);
3523 if (!cap)
3524 goto out;
3525
3526 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3527 if (err)
3528 goto out;
3529
3530 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3531 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3532 ctl |= v;
3533 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3534 }
3535out:
3536 return err;
3537}
3538
c87deff7
HS
3539/**
3540 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 3541 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
3542 * @flags: resource type mask to be selected
3543 *
3544 * This helper routine makes bar mask from the type of resource.
3545 */
3546int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3547{
3548 int i, bars = 0;
3549 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3550 if (pci_resource_flags(dev, i) & flags)
3551 bars |= (1 << i);
3552 return bars;
3553}
3554
613e7ed6
YZ
3555/**
3556 * pci_resource_bar - get position of the BAR associated with a resource
3557 * @dev: the PCI device
3558 * @resno: the resource number
3559 * @type: the BAR type to be filled in
3560 *
3561 * Returns BAR position in config space, or 0 if the BAR is invalid.
3562 */
3563int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3564{
d1b054da
YZ
3565 int reg;
3566
613e7ed6
YZ
3567 if (resno < PCI_ROM_RESOURCE) {
3568 *type = pci_bar_unknown;
3569 return PCI_BASE_ADDRESS_0 + 4 * resno;
3570 } else if (resno == PCI_ROM_RESOURCE) {
3571 *type = pci_bar_mem32;
3572 return dev->rom_base_reg;
d1b054da
YZ
3573 } else if (resno < PCI_BRIDGE_RESOURCES) {
3574 /* device specific resource */
3575 reg = pci_iov_resource_bar(dev, resno, type);
3576 if (reg)
3577 return reg;
613e7ed6
YZ
3578 }
3579
865df576 3580 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
3581 return 0;
3582}
3583
95a8b6ef
MT
3584/* Some architectures require additional programming to enable VGA */
3585static arch_set_vga_state_t arch_set_vga_state;
3586
3587void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3588{
3589 arch_set_vga_state = func; /* NULL disables */
3590}
3591
3592static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 3593 unsigned int command_bits, u32 flags)
95a8b6ef
MT
3594{
3595 if (arch_set_vga_state)
3596 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 3597 flags);
95a8b6ef
MT
3598 return 0;
3599}
3600
deb2d2ec
BH
3601/**
3602 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
3603 * @dev: the PCI device
3604 * @decode: true = enable decoding, false = disable decoding
3605 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 3606 * @flags: traverse ancestors and change bridges
3448a19d 3607 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
3608 */
3609int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 3610 unsigned int command_bits, u32 flags)
deb2d2ec
BH
3611{
3612 struct pci_bus *bus;
3613 struct pci_dev *bridge;
3614 u16 cmd;
95a8b6ef 3615 int rc;
deb2d2ec 3616
3448a19d 3617 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 3618
95a8b6ef 3619 /* ARCH specific VGA enables */
3448a19d 3620 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
3621 if (rc)
3622 return rc;
3623
3448a19d
DA
3624 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3625 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3626 if (decode == true)
3627 cmd |= command_bits;
3628 else
3629 cmd &= ~command_bits;
3630 pci_write_config_word(dev, PCI_COMMAND, cmd);
3631 }
deb2d2ec 3632
3448a19d 3633 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
3634 return 0;
3635
3636 bus = dev->bus;
3637 while (bus) {
3638 bridge = bus->self;
3639 if (bridge) {
3640 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3641 &cmd);
3642 if (decode == true)
3643 cmd |= PCI_BRIDGE_CTL_VGA;
3644 else
3645 cmd &= ~PCI_BRIDGE_CTL_VGA;
3646 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3647 cmd);
3648 }
3649 bus = bus->parent;
3650 }
3651 return 0;
3652}
3653
32a9a682
YS
3654#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3655static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 3656static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
3657
3658/**
3659 * pci_specified_resource_alignment - get resource alignment specified by user.
3660 * @dev: the PCI device to get
3661 *
3662 * RETURNS: Resource alignment if it is specified.
3663 * Zero if it is not specified.
3664 */
3665resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3666{
3667 int seg, bus, slot, func, align_order, count;
3668 resource_size_t align = 0;
3669 char *p;
3670
3671 spin_lock(&resource_alignment_lock);
3672 p = resource_alignment_param;
3673 while (*p) {
3674 count = 0;
3675 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3676 p[count] == '@') {
3677 p += count + 1;
3678 } else {
3679 align_order = -1;
3680 }
3681 if (sscanf(p, "%x:%x:%x.%x%n",
3682 &seg, &bus, &slot, &func, &count) != 4) {
3683 seg = 0;
3684 if (sscanf(p, "%x:%x.%x%n",
3685 &bus, &slot, &func, &count) != 3) {
3686 /* Invalid format */
3687 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3688 p);
3689 break;
3690 }
3691 }
3692 p += count;
3693 if (seg == pci_domain_nr(dev->bus) &&
3694 bus == dev->bus->number &&
3695 slot == PCI_SLOT(dev->devfn) &&
3696 func == PCI_FUNC(dev->devfn)) {
3697 if (align_order == -1) {
3698 align = PAGE_SIZE;
3699 } else {
3700 align = 1 << align_order;
3701 }
3702 /* Found */
3703 break;
3704 }
3705 if (*p != ';' && *p != ',') {
3706 /* End of param or invalid format */
3707 break;
3708 }
3709 p++;
3710 }
3711 spin_unlock(&resource_alignment_lock);
3712 return align;
3713}
3714
3715/**
3716 * pci_is_reassigndev - check if specified PCI is target device to reassign
3717 * @dev: the PCI device to check
3718 *
3719 * RETURNS: non-zero for PCI device is a target device to reassign,
3720 * or zero is not.
3721 */
3722int pci_is_reassigndev(struct pci_dev *dev)
3723{
3724 return (pci_specified_resource_alignment(dev) != 0);
3725}
3726
2069ecfb
YL
3727/*
3728 * This function disables memory decoding and releases memory resources
3729 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3730 * It also rounds up size to specified alignment.
3731 * Later on, the kernel will assign page-aligned memory resource back
3732 * to the device.
3733 */
3734void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3735{
3736 int i;
3737 struct resource *r;
3738 resource_size_t align, size;
3739 u16 command;
3740
3741 if (!pci_is_reassigndev(dev))
3742 return;
3743
3744 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3745 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3746 dev_warn(&dev->dev,
3747 "Can't reassign resources to host bridge.\n");
3748 return;
3749 }
3750
3751 dev_info(&dev->dev,
3752 "Disabling memory decoding and releasing memory resources.\n");
3753 pci_read_config_word(dev, PCI_COMMAND, &command);
3754 command &= ~PCI_COMMAND_MEMORY;
3755 pci_write_config_word(dev, PCI_COMMAND, command);
3756
3757 align = pci_specified_resource_alignment(dev);
3758 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3759 r = &dev->resource[i];
3760 if (!(r->flags & IORESOURCE_MEM))
3761 continue;
3762 size = resource_size(r);
3763 if (size < align) {
3764 size = align;
3765 dev_info(&dev->dev,
3766 "Rounding up size of resource #%d to %#llx.\n",
3767 i, (unsigned long long)size);
3768 }
3769 r->end = size - 1;
3770 r->start = 0;
3771 }
3772 /* Need to disable bridge's resource window,
3773 * to enable the kernel to reassign new resource
3774 * window later on.
3775 */
3776 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3777 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3778 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3779 r = &dev->resource[i];
3780 if (!(r->flags & IORESOURCE_MEM))
3781 continue;
3782 r->end = resource_size(r) - 1;
3783 r->start = 0;
3784 }
3785 pci_disable_bridge_window(dev);
3786 }
3787}
3788
32a9a682
YS
3789ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3790{
3791 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3792 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3793 spin_lock(&resource_alignment_lock);
3794 strncpy(resource_alignment_param, buf, count);
3795 resource_alignment_param[count] = '\0';
3796 spin_unlock(&resource_alignment_lock);
3797 return count;
3798}
3799
3800ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3801{
3802 size_t count;
3803 spin_lock(&resource_alignment_lock);
3804 count = snprintf(buf, size, "%s", resource_alignment_param);
3805 spin_unlock(&resource_alignment_lock);
3806 return count;
3807}
3808
3809static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3810{
3811 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3812}
3813
3814static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3815 const char *buf, size_t count)
3816{
3817 return pci_set_resource_alignment_param(buf, count);
3818}
3819
3820BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3821 pci_resource_alignment_store);
3822
3823static int __init pci_resource_alignment_sysfs_init(void)
3824{
3825 return bus_create_file(&pci_bus_type,
3826 &bus_attr_resource_alignment);
3827}
3828
3829late_initcall(pci_resource_alignment_sysfs_init);
3830
32a2eea7
JG
3831static void __devinit pci_no_domains(void)
3832{
3833#ifdef CONFIG_PCI_DOMAINS
3834 pci_domains_supported = 0;
3835#endif
3836}
3837
0ef5f8f6
AP
3838/**
3839 * pci_ext_cfg_enabled - can we access extended PCI config space?
3840 * @dev: The PCI device of the root bridge.
3841 *
3842 * Returns 1 if we can access PCI extended config space (offsets
3843 * greater than 0xff). This is the default implementation. Architecture
3844 * implementations can override this.
3845 */
3846int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3847{
3848 return 1;
3849}
3850
2d1c8618
BH
3851void __weak pci_fixup_cardbus(struct pci_bus *bus)
3852{
3853}
3854EXPORT_SYMBOL(pci_fixup_cardbus);
3855
ad04d31e 3856static int __init pci_setup(char *str)
1da177e4
LT
3857{
3858 while (str) {
3859 char *k = strchr(str, ',');
3860 if (k)
3861 *k++ = 0;
3862 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
3863 if (!strcmp(str, "nomsi")) {
3864 pci_no_msi();
7f785763
RD
3865 } else if (!strcmp(str, "noaer")) {
3866 pci_no_aer();
b55438fd
YL
3867 } else if (!strncmp(str, "realloc=", 8)) {
3868 pci_realloc_get_opt(str + 8);
f483d392 3869 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 3870 pci_realloc_get_opt("on");
32a2eea7
JG
3871 } else if (!strcmp(str, "nodomains")) {
3872 pci_no_domains();
6748dcc2
RW
3873 } else if (!strncmp(str, "noari", 5)) {
3874 pcie_ari_disabled = true;
4516a618
AN
3875 } else if (!strncmp(str, "cbiosize=", 9)) {
3876 pci_cardbus_io_size = memparse(str + 9, &str);
3877 } else if (!strncmp(str, "cbmemsize=", 10)) {
3878 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
3879 } else if (!strncmp(str, "resource_alignment=", 19)) {
3880 pci_set_resource_alignment_param(str + 19,
3881 strlen(str + 19));
43c16408
AP
3882 } else if (!strncmp(str, "ecrc=", 5)) {
3883 pcie_ecrc_get_policy(str + 5);
28760489
EB
3884 } else if (!strncmp(str, "hpiosize=", 9)) {
3885 pci_hotplug_io_size = memparse(str + 9, &str);
3886 } else if (!strncmp(str, "hpmemsize=", 10)) {
3887 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
3888 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3889 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
3890 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3891 pcie_bus_config = PCIE_BUS_SAFE;
3892 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3893 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
3894 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3895 pcie_bus_config = PCIE_BUS_PEER2PEER;
309e57df
MW
3896 } else {
3897 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3898 str);
3899 }
1da177e4
LT
3900 }
3901 str = k;
3902 }
0637a70a 3903 return 0;
1da177e4 3904}
0637a70a 3905early_param("pci", pci_setup);
1da177e4 3906
0b62e13b 3907EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
3908EXPORT_SYMBOL(pci_enable_device_io);
3909EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 3910EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
3911EXPORT_SYMBOL(pcim_enable_device);
3912EXPORT_SYMBOL(pcim_pin_device);
1da177e4 3913EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
3914EXPORT_SYMBOL(pci_find_capability);
3915EXPORT_SYMBOL(pci_bus_find_capability);
3916EXPORT_SYMBOL(pci_release_regions);
3917EXPORT_SYMBOL(pci_request_regions);
e8de1481 3918EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
3919EXPORT_SYMBOL(pci_release_region);
3920EXPORT_SYMBOL(pci_request_region);
e8de1481 3921EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
3922EXPORT_SYMBOL(pci_release_selected_regions);
3923EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 3924EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 3925EXPORT_SYMBOL(pci_set_master);
6a479079 3926EXPORT_SYMBOL(pci_clear_master);
1da177e4 3927EXPORT_SYMBOL(pci_set_mwi);
694625c0 3928EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 3929EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 3930EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
3931EXPORT_SYMBOL(pci_assign_resource);
3932EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 3933EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
3934
3935EXPORT_SYMBOL(pci_set_power_state);
3936EXPORT_SYMBOL(pci_save_state);
3937EXPORT_SYMBOL(pci_restore_state);
e5899e1b 3938EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 3939EXPORT_SYMBOL(pci_pme_active);
0235c4fc 3940EXPORT_SYMBOL(pci_wake_from_d3);
e5899e1b 3941EXPORT_SYMBOL(pci_target_state);
404cc2d8
RW
3942EXPORT_SYMBOL(pci_prepare_to_sleep);
3943EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 3944EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);