]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/pci/pci.c
PCI: Introduce pci_pcie_type(dev) to replace pci_dev->pcie_type
[mirror_ubuntu-zesty-kernel.git] / drivers / pci / pci.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
075c1771 14#include <linux/pm.h>
5a0e3ad6 15#include <linux/slab.h>
1da177e4
LT
16#include <linux/module.h>
17#include <linux/spinlock.h>
4e57b681 18#include <linux/string.h>
229f5afd 19#include <linux/log2.h>
7d715a6c 20#include <linux/pci-aspm.h>
c300bd2f 21#include <linux/pm_wakeup.h>
8dd7f803 22#include <linux/interrupt.h>
32a9a682 23#include <linux/device.h>
b67ea761 24#include <linux/pm_runtime.h>
284f5f9d 25#include <asm-generic/pci-bridge.h>
32a9a682 26#include <asm/setup.h>
bc56b9e0 27#include "pci.h"
1da177e4 28
00240c38
AS
29const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31};
32EXPORT_SYMBOL_GPL(pci_power_names);
33
93177a74
RW
34int isa_dma_bridge_buggy;
35EXPORT_SYMBOL(isa_dma_bridge_buggy);
36
37int pci_pci_problems;
38EXPORT_SYMBOL(pci_pci_problems);
39
1ae861e6
RW
40unsigned int pci_pm_d3_delay;
41
df17e62e
MG
42static void pci_pme_list_scan(struct work_struct *work);
43
44static LIST_HEAD(pci_pme_list);
45static DEFINE_MUTEX(pci_pme_list_mutex);
46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47
48struct pci_pme_device {
49 struct list_head list;
50 struct pci_dev *dev;
51};
52
53#define PME_TIMEOUT 1000 /* How long between PME checks */
54
1ae861e6
RW
55static void pci_dev_d3_sleep(struct pci_dev *dev)
56{
57 unsigned int delay = dev->d3_delay;
58
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
61
62 msleep(delay);
63}
1da177e4 64
32a2eea7
JG
65#ifdef CONFIG_PCI_DOMAINS
66int pci_domains_supported = 1;
67#endif
68
4516a618
AN
69#define DEFAULT_CARDBUS_IO_SIZE (256)
70#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74
28760489
EB
75#define DEFAULT_HOTPLUG_IO_SIZE (256)
76#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
78unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80
5f39e670 81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495 82
ac1aa47b
JB
83/*
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
88 */
98e724c7 89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
90u8 pci_cache_line_size;
91
96c55900
MS
92/*
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
95 */
96unsigned int pcibios_max_latency = 255;
97
6748dcc2
RW
98/* If set, the PCIe ARI capability will not be used. */
99static bool pcie_ari_disabled;
100
1da177e4
LT
101/**
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
104 *
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
107 */
96bde06a 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
1da177e4
LT
109{
110 struct list_head *tmp;
111 unsigned char max, n;
112
b918c62e 113 max = bus->busn_res.end;
1da177e4
LT
114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max)
117 max = n;
118 }
119 return max;
120}
b82db5ce 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 122
1684f5dd
AM
123#ifdef CONFIG_HAS_IOMEM
124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125{
126 /*
127 * Make sure the BAR is actually a memory resource, not an IO resource
128 */
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 WARN_ON(1);
131 return NULL;
132 }
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
135}
136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif
138
687d5fe3
ME
139#define PCI_FIND_CAP_TTL 48
140
141static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
142 u8 pos, int cap, int *ttl)
24a4e377
RD
143{
144 u8 id;
24a4e377 145
687d5fe3 146 while ((*ttl)--) {
24a4e377
RD
147 pci_bus_read_config_byte(bus, devfn, pos, &pos);
148 if (pos < 0x40)
149 break;
150 pos &= ~3;
151 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
152 &id);
153 if (id == 0xff)
154 break;
155 if (id == cap)
156 return pos;
157 pos += PCI_CAP_LIST_NEXT;
158 }
159 return 0;
160}
161
687d5fe3
ME
162static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
163 u8 pos, int cap)
164{
165 int ttl = PCI_FIND_CAP_TTL;
166
167 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
168}
169
24a4e377
RD
170int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
171{
172 return __pci_find_next_cap(dev->bus, dev->devfn,
173 pos + PCI_CAP_LIST_NEXT, cap);
174}
175EXPORT_SYMBOL_GPL(pci_find_next_capability);
176
d3bac118
ME
177static int __pci_bus_find_cap_start(struct pci_bus *bus,
178 unsigned int devfn, u8 hdr_type)
1da177e4
LT
179{
180 u16 status;
1da177e4
LT
181
182 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
183 if (!(status & PCI_STATUS_CAP_LIST))
184 return 0;
185
186 switch (hdr_type) {
187 case PCI_HEADER_TYPE_NORMAL:
188 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 189 return PCI_CAPABILITY_LIST;
1da177e4 190 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 191 return PCI_CB_CAPABILITY_LIST;
1da177e4
LT
192 default:
193 return 0;
194 }
d3bac118
ME
195
196 return 0;
1da177e4
LT
197}
198
199/**
200 * pci_find_capability - query for devices' capabilities
201 * @dev: PCI device to query
202 * @cap: capability code
203 *
204 * Tell if a device supports a given PCI capability.
205 * Returns the address of the requested capability structure within the
206 * device's PCI configuration space or 0 in case the device does not
207 * support it. Possible values for @cap:
208 *
209 * %PCI_CAP_ID_PM Power Management
210 * %PCI_CAP_ID_AGP Accelerated Graphics Port
211 * %PCI_CAP_ID_VPD Vital Product Data
212 * %PCI_CAP_ID_SLOTID Slot Identification
213 * %PCI_CAP_ID_MSI Message Signalled Interrupts
214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
215 * %PCI_CAP_ID_PCIX PCI-X
216 * %PCI_CAP_ID_EXP PCI Express
217 */
218int pci_find_capability(struct pci_dev *dev, int cap)
219{
d3bac118
ME
220 int pos;
221
222 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
223 if (pos)
224 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
225
226 return pos;
1da177e4
LT
227}
228
229/**
230 * pci_bus_find_capability - query for devices' capabilities
231 * @bus: the PCI bus to query
232 * @devfn: PCI device to query
233 * @cap: capability code
234 *
235 * Like pci_find_capability() but works for pci devices that do not have a
236 * pci_dev structure set up yet.
237 *
238 * Returns the address of the requested capability structure within the
239 * device's PCI configuration space or 0 in case the device does not
240 * support it.
241 */
242int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
243{
d3bac118 244 int pos;
1da177e4
LT
245 u8 hdr_type;
246
247 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
248
d3bac118
ME
249 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
250 if (pos)
251 pos = __pci_find_next_cap(bus, devfn, pos, cap);
252
253 return pos;
1da177e4
LT
254}
255
c463b8cb
MS
256/**
257 * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure
258 * @dev: PCI device to check
259 *
260 * Like pci_pcie_cap() but also checks that the PCIe capability version is
261 * >= 2. Note that v1 capability structures could be sparse in that not
262 * all register fields were required. v2 requires the entire structure to
263 * be present size wise, while still allowing for non-implemented registers
264 * to exist but they must be hardwired to 0.
265 *
266 * Due to the differences in the versions of capability structures, one
267 * must be careful not to try and access non-existant registers that may
268 * exist in early versions - v1 - of Express devices.
269 *
270 * Returns the offset of the PCIe capability structure as long as the
271 * capability version is >= 2; otherwise 0 is returned.
272 */
273static int pci_pcie_cap2(struct pci_dev *dev)
274{
275 u16 flags;
276 int pos;
277
278 pos = pci_pcie_cap(dev);
279 if (pos) {
280 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
281 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
282 pos = 0;
283 }
284
285 return pos;
286}
287
1da177e4
LT
288/**
289 * pci_find_ext_capability - Find an extended capability
290 * @dev: PCI device to query
291 * @cap: capability code
292 *
293 * Returns the address of the requested extended capability structure
294 * within the device's PCI configuration space or 0 if the device does
295 * not support it. Possible values for @cap:
296 *
297 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
298 * %PCI_EXT_CAP_ID_VC Virtual Channel
299 * %PCI_EXT_CAP_ID_DSN Device Serial Number
300 * %PCI_EXT_CAP_ID_PWR Power Budgeting
301 */
302int pci_find_ext_capability(struct pci_dev *dev, int cap)
303{
304 u32 header;
557848c3
ZY
305 int ttl;
306 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 307
557848c3
ZY
308 /* minimum 8 bytes per capability */
309 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
310
311 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
312 return 0;
313
314 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
315 return 0;
316
317 /*
318 * If we have no capabilities, this is indicated by cap ID,
319 * cap version and next pointer all being 0.
320 */
321 if (header == 0)
322 return 0;
323
324 while (ttl-- > 0) {
325 if (PCI_EXT_CAP_ID(header) == cap)
326 return pos;
327
328 pos = PCI_EXT_CAP_NEXT(header);
557848c3 329 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
330 break;
331
332 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
333 break;
334 }
335
336 return 0;
337}
3a720d72 338EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 339
687d5fe3
ME
340static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
341{
342 int rc, ttl = PCI_FIND_CAP_TTL;
343 u8 cap, mask;
344
345 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
346 mask = HT_3BIT_CAP_MASK;
347 else
348 mask = HT_5BIT_CAP_MASK;
349
350 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
351 PCI_CAP_ID_HT, &ttl);
352 while (pos) {
353 rc = pci_read_config_byte(dev, pos + 3, &cap);
354 if (rc != PCIBIOS_SUCCESSFUL)
355 return 0;
356
357 if ((cap & mask) == ht_cap)
358 return pos;
359
47a4d5be
BG
360 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
361 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
362 PCI_CAP_ID_HT, &ttl);
363 }
364
365 return 0;
366}
367/**
368 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
369 * @dev: PCI device to query
370 * @pos: Position from which to continue searching
371 * @ht_cap: Hypertransport capability code
372 *
373 * To be used in conjunction with pci_find_ht_capability() to search for
374 * all capabilities matching @ht_cap. @pos should always be a value returned
375 * from pci_find_ht_capability().
376 *
377 * NB. To be 100% safe against broken PCI devices, the caller should take
378 * steps to avoid an infinite loop.
379 */
380int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
381{
382 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
383}
384EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
385
386/**
387 * pci_find_ht_capability - query a device's Hypertransport capabilities
388 * @dev: PCI device to query
389 * @ht_cap: Hypertransport capability code
390 *
391 * Tell if a device supports a given Hypertransport capability.
392 * Returns an address within the device's PCI configuration space
393 * or 0 in case the device does not support the request capability.
394 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
395 * which has a Hypertransport capability matching @ht_cap.
396 */
397int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
398{
399 int pos;
400
401 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
402 if (pos)
403 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
404
405 return pos;
406}
407EXPORT_SYMBOL_GPL(pci_find_ht_capability);
408
1da177e4
LT
409/**
410 * pci_find_parent_resource - return resource region of parent bus of given region
411 * @dev: PCI device structure contains resources to be searched
412 * @res: child resource record for which parent is sought
413 *
414 * For given resource region of given device, return the resource
415 * region of parent bus the given region is contained in or where
416 * it should be allocated from.
417 */
418struct resource *
419pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
420{
421 const struct pci_bus *bus = dev->bus;
422 int i;
89a74ecc 423 struct resource *best = NULL, *r;
1da177e4 424
89a74ecc 425 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
426 if (!r)
427 continue;
428 if (res->start && !(res->start >= r->start && res->end <= r->end))
429 continue; /* Not contained */
430 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
431 continue; /* Wrong type */
432 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
433 return r; /* Exact match */
8c8def26
LT
434 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
435 if (r->flags & IORESOURCE_PREFETCH)
436 continue;
437 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
438 if (!best)
439 best = r;
1da177e4
LT
440 }
441 return best;
442}
443
064b53db
JL
444/**
445 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
446 * @dev: PCI device to have its BARs restored
447 *
448 * Restore the BAR values for a given device, so as to make it
449 * accessible by its driver.
450 */
ad668599 451static void
064b53db
JL
452pci_restore_bars(struct pci_dev *dev)
453{
bc5f5a82 454 int i;
064b53db 455
bc5f5a82 456 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 457 pci_update_resource(dev, i);
064b53db
JL
458}
459
961d9120
RW
460static struct pci_platform_pm_ops *pci_platform_pm;
461
462int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
463{
eb9d0fe4
RW
464 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
465 || !ops->sleep_wake || !ops->can_wakeup)
961d9120
RW
466 return -EINVAL;
467 pci_platform_pm = ops;
468 return 0;
469}
470
471static inline bool platform_pci_power_manageable(struct pci_dev *dev)
472{
473 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
474}
475
476static inline int platform_pci_set_power_state(struct pci_dev *dev,
477 pci_power_t t)
478{
479 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
480}
481
482static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
483{
484 return pci_platform_pm ?
485 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
486}
8f7020d3 487
eb9d0fe4
RW
488static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
489{
490 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
491}
492
493static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
494{
495 return pci_platform_pm ?
496 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
497}
498
b67ea761
RW
499static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
500{
501 return pci_platform_pm ?
502 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
503}
504
1da177e4 505/**
44e4e66e
RW
506 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
507 * given PCI device
508 * @dev: PCI device to handle.
44e4e66e 509 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 510 *
44e4e66e
RW
511 * RETURN VALUE:
512 * -EINVAL if the requested state is invalid.
513 * -EIO if device does not support PCI PM or its PM capabilities register has a
514 * wrong version, or device doesn't support the requested state.
515 * 0 if device already is in the requested state.
516 * 0 if device's power state has been successfully changed.
1da177e4 517 */
f00a20ef 518static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 519{
337001b6 520 u16 pmcsr;
44e4e66e 521 bool need_restore = false;
1da177e4 522
4a865905
RW
523 /* Check if we're already there */
524 if (dev->current_state == state)
525 return 0;
526
337001b6 527 if (!dev->pm_cap)
cca03dec
AL
528 return -EIO;
529
44e4e66e
RW
530 if (state < PCI_D0 || state > PCI_D3hot)
531 return -EINVAL;
532
1da177e4
LT
533 /* Validate current state:
534 * Can enter D0 from any state, but if we can only go deeper
535 * to sleep if we're already in a low power state
536 */
4a865905 537 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 538 && dev->current_state > state) {
80ccba11
BH
539 dev_err(&dev->dev, "invalid power transition "
540 "(from state %d to %d)\n", dev->current_state, state);
1da177e4 541 return -EINVAL;
44e4e66e 542 }
1da177e4 543
1da177e4 544 /* check if this device supports the desired state */
337001b6
RW
545 if ((state == PCI_D1 && !dev->d1_support)
546 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 547 return -EIO;
1da177e4 548
337001b6 549 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 550
32a36585 551 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
552 * This doesn't affect PME_Status, disables PME_En, and
553 * sets PowerState to 0.
554 */
32a36585 555 switch (dev->current_state) {
d3535fbb
JL
556 case PCI_D0:
557 case PCI_D1:
558 case PCI_D2:
559 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
560 pmcsr |= state;
561 break;
f62795f1
RW
562 case PCI_D3hot:
563 case PCI_D3cold:
32a36585
JL
564 case PCI_UNKNOWN: /* Boot-up */
565 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 566 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 567 need_restore = true;
32a36585 568 /* Fall-through: force to D0 */
32a36585 569 default:
d3535fbb 570 pmcsr = 0;
32a36585 571 break;
1da177e4
LT
572 }
573
574 /* enter specified state */
337001b6 575 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
576
577 /* Mandatory power management transition delays */
578 /* see PCI PM 1.1 5.6.1 table 18 */
579 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 580 pci_dev_d3_sleep(dev);
1da177e4 581 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 582 udelay(PCI_PM_D2_DELAY);
1da177e4 583
e13cdbd7
RW
584 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
585 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
586 if (dev->current_state != state && printk_ratelimit())
587 dev_info(&dev->dev, "Refused to change power state, "
588 "currently in D%d\n", dev->current_state);
064b53db 589
448bd857
HY
590 /*
591 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
064b53db
JL
592 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
593 * from D3hot to D0 _may_ perform an internal reset, thereby
594 * going to "D0 Uninitialized" rather than "D0 Initialized".
595 * For example, at least some versions of the 3c905B and the
596 * 3c556B exhibit this behaviour.
597 *
598 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
599 * devices in a D3hot state at boot. Consequently, we need to
600 * restore at least the BARs so that the device will be
601 * accessible to its driver.
602 */
603 if (need_restore)
604 pci_restore_bars(dev);
605
f00a20ef 606 if (dev->bus->self)
7d715a6c
SL
607 pcie_aspm_pm_state_change(dev->bus->self);
608
1da177e4
LT
609 return 0;
610}
611
44e4e66e
RW
612/**
613 * pci_update_current_state - Read PCI power state of given device from its
614 * PCI PM registers and cache it
615 * @dev: PCI device to handle.
f06fc0b6 616 * @state: State to cache in case the device doesn't have the PM capability
44e4e66e 617 */
73410429 618void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 619{
337001b6 620 if (dev->pm_cap) {
44e4e66e
RW
621 u16 pmcsr;
622
448bd857
HY
623 /*
624 * Configuration space is not accessible for device in
625 * D3cold, so just keep or set D3cold for safety
626 */
627 if (dev->current_state == PCI_D3cold)
628 return;
629 if (state == PCI_D3cold) {
630 dev->current_state = PCI_D3cold;
631 return;
632 }
337001b6 633 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 634 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
635 } else {
636 dev->current_state = state;
44e4e66e
RW
637 }
638}
639
db288c9c
RW
640/**
641 * pci_power_up - Put the given device into D0 forcibly
642 * @dev: PCI device to power up
643 */
644void pci_power_up(struct pci_dev *dev)
645{
646 if (platform_pci_power_manageable(dev))
647 platform_pci_set_power_state(dev, PCI_D0);
648
649 pci_raw_set_power_state(dev, PCI_D0);
650 pci_update_current_state(dev, PCI_D0);
651}
652
0e5dd46b
RW
653/**
654 * pci_platform_power_transition - Use platform to change device power state
655 * @dev: PCI device to handle.
656 * @state: State to put the device into.
657 */
658static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
659{
660 int error;
661
662 if (platform_pci_power_manageable(dev)) {
663 error = platform_pci_set_power_state(dev, state);
664 if (!error)
665 pci_update_current_state(dev, state);
b51306c6
AH
666 /* Fall back to PCI_D0 if native PM is not supported */
667 if (!dev->pm_cap)
668 dev->current_state = PCI_D0;
0e5dd46b
RW
669 } else {
670 error = -ENODEV;
671 /* Fall back to PCI_D0 if native PM is not supported */
b3bad72e
RW
672 if (!dev->pm_cap)
673 dev->current_state = PCI_D0;
0e5dd46b
RW
674 }
675
676 return error;
677}
678
679/**
680 * __pci_start_power_transition - Start power transition of a PCI device
681 * @dev: PCI device to handle.
682 * @state: State to put the device into.
683 */
684static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
685{
448bd857 686 if (state == PCI_D0) {
0e5dd46b 687 pci_platform_power_transition(dev, PCI_D0);
448bd857
HY
688 /*
689 * Mandatory power management transition delays, see
690 * PCI Express Base Specification Revision 2.0 Section
691 * 6.6.1: Conventional Reset. Do not delay for
692 * devices powered on/off by corresponding bridge,
693 * because have already delayed for the bridge.
694 */
695 if (dev->runtime_d3cold) {
696 msleep(dev->d3cold_delay);
697 /*
698 * When powering on a bridge from D3cold, the
699 * whole hierarchy may be powered on into
700 * D0uninitialized state, resume them to give
701 * them a chance to suspend again
702 */
703 pci_wakeup_bus(dev->subordinate);
704 }
705 }
706}
707
708/**
709 * __pci_dev_set_current_state - Set current state of a PCI device
710 * @dev: Device to handle
711 * @data: pointer to state to be set
712 */
713static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
714{
715 pci_power_t state = *(pci_power_t *)data;
716
717 dev->current_state = state;
718 return 0;
719}
720
721/**
722 * __pci_bus_set_current_state - Walk given bus and set current state of devices
723 * @bus: Top bus of the subtree to walk.
724 * @state: state to be set
725 */
726static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
727{
728 if (bus)
729 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
0e5dd46b
RW
730}
731
732/**
733 * __pci_complete_power_transition - Complete power transition of a PCI device
734 * @dev: PCI device to handle.
735 * @state: State to put the device into.
736 *
737 * This function should not be called directly by device drivers.
738 */
739int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
740{
448bd857
HY
741 int ret;
742
db288c9c 743 if (state <= PCI_D0)
448bd857
HY
744 return -EINVAL;
745 ret = pci_platform_power_transition(dev, state);
746 /* Power off the bridge may power off the whole hierarchy */
747 if (!ret && state == PCI_D3cold)
748 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
749 return ret;
0e5dd46b
RW
750}
751EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
752
44e4e66e
RW
753/**
754 * pci_set_power_state - Set the power state of a PCI device
755 * @dev: PCI device to handle.
756 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
757 *
877d0310 758 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
759 * the device's PCI PM registers.
760 *
761 * RETURN VALUE:
762 * -EINVAL if the requested state is invalid.
763 * -EIO if device does not support PCI PM or its PM capabilities register has a
764 * wrong version, or device doesn't support the requested state.
765 * 0 if device already is in the requested state.
766 * 0 if device's power state has been successfully changed.
767 */
768int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
769{
337001b6 770 int error;
44e4e66e
RW
771
772 /* bound the state we're entering */
448bd857
HY
773 if (state > PCI_D3cold)
774 state = PCI_D3cold;
44e4e66e
RW
775 else if (state < PCI_D0)
776 state = PCI_D0;
777 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
778 /*
779 * If the device or the parent bridge do not support PCI PM,
780 * ignore the request if we're doing anything other than putting
781 * it into D0 (which would only happen on boot).
782 */
783 return 0;
784
db288c9c
RW
785 /* Check if we're already there */
786 if (dev->current_state == state)
787 return 0;
788
0e5dd46b
RW
789 __pci_start_power_transition(dev, state);
790
979b1791
AC
791 /* This device is quirked not to be put into D3, so
792 don't put it in D3 */
448bd857 793 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
979b1791 794 return 0;
44e4e66e 795
448bd857
HY
796 /*
797 * To put device in D3cold, we put device into D3hot in native
798 * way, then put device into D3cold with platform ops
799 */
800 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
801 PCI_D3hot : state);
44e4e66e 802
0e5dd46b
RW
803 if (!__pci_complete_power_transition(dev, state))
804 error = 0;
1a680b7c
NC
805 /*
806 * When aspm_policy is "powersave" this call ensures
807 * that ASPM is configured.
808 */
809 if (!error && dev->bus->self)
810 pcie_aspm_powersave_config_link(dev->bus->self);
44e4e66e
RW
811
812 return error;
813}
814
1da177e4
LT
815/**
816 * pci_choose_state - Choose the power state of a PCI device
817 * @dev: PCI device to be suspended
818 * @state: target sleep state for the whole system. This is the value
819 * that is passed to suspend() function.
820 *
821 * Returns PCI power state suitable for given device and given system
822 * message.
823 */
824
825pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
826{
ab826ca4 827 pci_power_t ret;
0f64474b 828
1da177e4
LT
829 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
830 return PCI_D0;
831
961d9120
RW
832 ret = platform_pci_choose_state(dev);
833 if (ret != PCI_POWER_ERROR)
834 return ret;
ca078bae
PM
835
836 switch (state.event) {
837 case PM_EVENT_ON:
838 return PCI_D0;
839 case PM_EVENT_FREEZE:
b887d2e6
DB
840 case PM_EVENT_PRETHAW:
841 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 842 case PM_EVENT_SUSPEND:
3a2d5b70 843 case PM_EVENT_HIBERNATE:
ca078bae 844 return PCI_D3hot;
1da177e4 845 default:
80ccba11
BH
846 dev_info(&dev->dev, "unrecognized suspend event %d\n",
847 state.event);
1da177e4
LT
848 BUG();
849 }
850 return PCI_D0;
851}
852
853EXPORT_SYMBOL(pci_choose_state);
854
89858517
YZ
855#define PCI_EXP_SAVE_REGS 7
856
1b6b8ce2
YZ
857#define pcie_cap_has_devctl(type, flags) 1
858#define pcie_cap_has_lnkctl(type, flags) \
859 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
860 (type == PCI_EXP_TYPE_ROOT_PORT || \
861 type == PCI_EXP_TYPE_ENDPOINT || \
862 type == PCI_EXP_TYPE_LEG_END))
863#define pcie_cap_has_sltctl(type, flags) \
864 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
865 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
866 (type == PCI_EXP_TYPE_DOWNSTREAM && \
867 (flags & PCI_EXP_FLAGS_SLOT))))
868#define pcie_cap_has_rtctl(type, flags) \
869 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
870 (type == PCI_EXP_TYPE_ROOT_PORT || \
871 type == PCI_EXP_TYPE_RC_EC))
1b6b8ce2 872
34a4876e
YL
873static struct pci_cap_saved_state *pci_find_saved_cap(
874 struct pci_dev *pci_dev, char cap)
875{
876 struct pci_cap_saved_state *tmp;
877 struct hlist_node *pos;
878
879 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
880 if (tmp->cap.cap_nr == cap)
881 return tmp;
882 }
883 return NULL;
884}
885
b56a5a23
MT
886static int pci_save_pcie_state(struct pci_dev *dev)
887{
62f87c0e 888 int type, pos, i = 0;
b56a5a23
MT
889 struct pci_cap_saved_state *save_state;
890 u16 *cap;
1b6b8ce2 891 u16 flags;
b56a5a23 892
06a1cbaf
KK
893 pos = pci_pcie_cap(dev);
894 if (!pos)
b56a5a23
MT
895 return 0;
896
9f35575d 897 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 898 if (!save_state) {
e496b617 899 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
900 return -ENOMEM;
901 }
24a4742f 902 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 903
1b6b8ce2
YZ
904 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
905
62f87c0e
YW
906 type = pci_pcie_type(dev);
907 if (pcie_cap_has_devctl(type, flags))
1b6b8ce2 908 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
62f87c0e 909 if (pcie_cap_has_lnkctl(type, flags))
1b6b8ce2 910 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
62f87c0e 911 if (pcie_cap_has_sltctl(type, flags))
1b6b8ce2 912 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
62f87c0e 913 if (pcie_cap_has_rtctl(type, flags))
1b6b8ce2 914 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
63f4898a 915
9cb604ed
MS
916 pos = pci_pcie_cap2(dev);
917 if (!pos)
918 return 0;
919
920 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
921 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
922 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
b56a5a23
MT
923 return 0;
924}
925
926static void pci_restore_pcie_state(struct pci_dev *dev)
927{
62f87c0e 928 int i = 0, pos, type;
b56a5a23
MT
929 struct pci_cap_saved_state *save_state;
930 u16 *cap;
1b6b8ce2 931 u16 flags;
b56a5a23
MT
932
933 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
934 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
935 if (!save_state || pos <= 0)
936 return;
24a4742f 937 cap = (u16 *)&save_state->cap.data[0];
b56a5a23 938
1b6b8ce2
YZ
939 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
940
62f87c0e
YW
941 type = pci_pcie_type(dev);
942 if (pcie_cap_has_devctl(type, flags))
1b6b8ce2 943 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
62f87c0e 944 if (pcie_cap_has_lnkctl(type, flags))
1b6b8ce2 945 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
62f87c0e 946 if (pcie_cap_has_sltctl(type, flags))
1b6b8ce2 947 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
62f87c0e 948 if (pcie_cap_has_rtctl(type, flags))
1b6b8ce2 949 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
9cb604ed
MS
950
951 pos = pci_pcie_cap2(dev);
952 if (!pos)
953 return;
954
955 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
956 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
957 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
958}
959
cc692a5f
SH
960
961static int pci_save_pcix_state(struct pci_dev *dev)
962{
63f4898a 963 int pos;
cc692a5f 964 struct pci_cap_saved_state *save_state;
cc692a5f
SH
965
966 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
967 if (pos <= 0)
968 return 0;
969
f34303de 970 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 971 if (!save_state) {
e496b617 972 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
973 return -ENOMEM;
974 }
cc692a5f 975
24a4742f
AW
976 pci_read_config_word(dev, pos + PCI_X_CMD,
977 (u16 *)save_state->cap.data);
63f4898a 978
cc692a5f
SH
979 return 0;
980}
981
982static void pci_restore_pcix_state(struct pci_dev *dev)
983{
984 int i = 0, pos;
985 struct pci_cap_saved_state *save_state;
986 u16 *cap;
987
988 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
989 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
990 if (!save_state || pos <= 0)
991 return;
24a4742f 992 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
993
994 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
995}
996
997
1da177e4
LT
998/**
999 * pci_save_state - save the PCI configuration space of a device before suspending
1000 * @dev: - PCI device that we're dealing with
1da177e4
LT
1001 */
1002int
1003pci_save_state(struct pci_dev *dev)
1004{
1005 int i;
1006 /* XXX: 100% dword access ok here? */
1007 for (i = 0; i < 16; i++)
9e0b5b2c 1008 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 1009 dev->state_saved = true;
b56a5a23
MT
1010 if ((i = pci_save_pcie_state(dev)) != 0)
1011 return i;
cc692a5f
SH
1012 if ((i = pci_save_pcix_state(dev)) != 0)
1013 return i;
1da177e4
LT
1014 return 0;
1015}
1016
ebfc5b80
RW
1017static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1018 u32 saved_val, int retry)
1019{
1020 u32 val;
1021
1022 pci_read_config_dword(pdev, offset, &val);
1023 if (val == saved_val)
1024 return;
1025
1026 for (;;) {
1027 dev_dbg(&pdev->dev, "restoring config space at offset "
1028 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
1029 pci_write_config_dword(pdev, offset, saved_val);
1030 if (retry-- <= 0)
1031 return;
1032
1033 pci_read_config_dword(pdev, offset, &val);
1034 if (val == saved_val)
1035 return;
1036
1037 mdelay(1);
1038 }
1039}
1040
a6cb9ee7
RW
1041static void pci_restore_config_space_range(struct pci_dev *pdev,
1042 int start, int end, int retry)
ebfc5b80
RW
1043{
1044 int index;
1045
1046 for (index = end; index >= start; index--)
1047 pci_restore_config_dword(pdev, 4 * index,
1048 pdev->saved_config_space[index],
1049 retry);
1050}
1051
a6cb9ee7
RW
1052static void pci_restore_config_space(struct pci_dev *pdev)
1053{
1054 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1055 pci_restore_config_space_range(pdev, 10, 15, 0);
1056 /* Restore BARs before the command register. */
1057 pci_restore_config_space_range(pdev, 4, 9, 10);
1058 pci_restore_config_space_range(pdev, 0, 3, 0);
1059 } else {
1060 pci_restore_config_space_range(pdev, 0, 15, 0);
1061 }
1062}
1063
1da177e4
LT
1064/**
1065 * pci_restore_state - Restore the saved state of a PCI device
1066 * @dev: - PCI device that we're dealing with
1da177e4 1067 */
1d3c16a8 1068void pci_restore_state(struct pci_dev *dev)
1da177e4 1069{
c82f63e4 1070 if (!dev->state_saved)
1d3c16a8 1071 return;
4b77b0a2 1072
b56a5a23
MT
1073 /* PCI Express register must be restored first */
1074 pci_restore_pcie_state(dev);
1900ca13 1075 pci_restore_ats_state(dev);
b56a5a23 1076
a6cb9ee7 1077 pci_restore_config_space(dev);
ebfc5b80 1078
cc692a5f 1079 pci_restore_pcix_state(dev);
41017f0c 1080 pci_restore_msi_state(dev);
8c5cdb6a 1081 pci_restore_iov_state(dev);
8fed4b65 1082
4b77b0a2 1083 dev->state_saved = false;
1da177e4
LT
1084}
1085
ffbdd3f7
AW
1086struct pci_saved_state {
1087 u32 config_space[16];
1088 struct pci_cap_saved_data cap[0];
1089};
1090
1091/**
1092 * pci_store_saved_state - Allocate and return an opaque struct containing
1093 * the device saved state.
1094 * @dev: PCI device that we're dealing with
1095 *
1096 * Rerturn NULL if no state or error.
1097 */
1098struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1099{
1100 struct pci_saved_state *state;
1101 struct pci_cap_saved_state *tmp;
1102 struct pci_cap_saved_data *cap;
1103 struct hlist_node *pos;
1104 size_t size;
1105
1106 if (!dev->state_saved)
1107 return NULL;
1108
1109 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1110
1111 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1112 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1113
1114 state = kzalloc(size, GFP_KERNEL);
1115 if (!state)
1116 return NULL;
1117
1118 memcpy(state->config_space, dev->saved_config_space,
1119 sizeof(state->config_space));
1120
1121 cap = state->cap;
1122 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1123 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1124 memcpy(cap, &tmp->cap, len);
1125 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1126 }
1127 /* Empty cap_save terminates list */
1128
1129 return state;
1130}
1131EXPORT_SYMBOL_GPL(pci_store_saved_state);
1132
1133/**
1134 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1135 * @dev: PCI device that we're dealing with
1136 * @state: Saved state returned from pci_store_saved_state()
1137 */
1138int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1139{
1140 struct pci_cap_saved_data *cap;
1141
1142 dev->state_saved = false;
1143
1144 if (!state)
1145 return 0;
1146
1147 memcpy(dev->saved_config_space, state->config_space,
1148 sizeof(state->config_space));
1149
1150 cap = state->cap;
1151 while (cap->size) {
1152 struct pci_cap_saved_state *tmp;
1153
1154 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1155 if (!tmp || tmp->cap.size != cap->size)
1156 return -EINVAL;
1157
1158 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1159 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1160 sizeof(struct pci_cap_saved_data) + cap->size);
1161 }
1162
1163 dev->state_saved = true;
1164 return 0;
1165}
1166EXPORT_SYMBOL_GPL(pci_load_saved_state);
1167
1168/**
1169 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1170 * and free the memory allocated for it.
1171 * @dev: PCI device that we're dealing with
1172 * @state: Pointer to saved state returned from pci_store_saved_state()
1173 */
1174int pci_load_and_free_saved_state(struct pci_dev *dev,
1175 struct pci_saved_state **state)
1176{
1177 int ret = pci_load_saved_state(dev, *state);
1178 kfree(*state);
1179 *state = NULL;
1180 return ret;
1181}
1182EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1183
38cc1302
HS
1184static int do_pci_enable_device(struct pci_dev *dev, int bars)
1185{
1186 int err;
1187
1188 err = pci_set_power_state(dev, PCI_D0);
1189 if (err < 0 && err != -EIO)
1190 return err;
1191 err = pcibios_enable_device(dev, bars);
1192 if (err < 0)
1193 return err;
1194 pci_fixup_device(pci_fixup_enable, dev);
1195
1196 return 0;
1197}
1198
1199/**
0b62e13b 1200 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1201 * @dev: PCI device to be resumed
1202 *
1203 * Note this function is a backend of pci_default_resume and is not supposed
1204 * to be called by normal code, write proper resume handler and use it instead.
1205 */
0b62e13b 1206int pci_reenable_device(struct pci_dev *dev)
38cc1302 1207{
296ccb08 1208 if (pci_is_enabled(dev))
38cc1302
HS
1209 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1210 return 0;
1211}
1212
b718989d
BH
1213static int __pci_enable_device_flags(struct pci_dev *dev,
1214 resource_size_t flags)
1da177e4
LT
1215{
1216 int err;
b718989d 1217 int i, bars = 0;
1da177e4 1218
97c145f7
JB
1219 /*
1220 * Power state could be unknown at this point, either due to a fresh
1221 * boot or a device removal call. So get the current power state
1222 * so that things like MSI message writing will behave as expected
1223 * (e.g. if the device really is in D0 at enable time).
1224 */
1225 if (dev->pm_cap) {
1226 u16 pmcsr;
1227 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1228 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1229 }
1230
9fb625c3
HS
1231 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1232 return 0; /* already enabled */
1233
497f16f2
YL
1234 /* only skip sriov related */
1235 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1236 if (dev->resource[i].flags & flags)
1237 bars |= (1 << i);
1238 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1239 if (dev->resource[i].flags & flags)
1240 bars |= (1 << i);
1241
38cc1302 1242 err = do_pci_enable_device(dev, bars);
95a62965 1243 if (err < 0)
38cc1302 1244 atomic_dec(&dev->enable_cnt);
9fb625c3 1245 return err;
1da177e4
LT
1246}
1247
b718989d
BH
1248/**
1249 * pci_enable_device_io - Initialize a device for use with IO space
1250 * @dev: PCI device to be initialized
1251 *
1252 * Initialize device before it's used by a driver. Ask low-level code
1253 * to enable I/O resources. Wake up the device if it was suspended.
1254 * Beware, this function can fail.
1255 */
1256int pci_enable_device_io(struct pci_dev *dev)
1257{
1258 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1259}
1260
1261/**
1262 * pci_enable_device_mem - Initialize a device for use with Memory space
1263 * @dev: PCI device to be initialized
1264 *
1265 * Initialize device before it's used by a driver. Ask low-level code
1266 * to enable Memory resources. Wake up the device if it was suspended.
1267 * Beware, this function can fail.
1268 */
1269int pci_enable_device_mem(struct pci_dev *dev)
1270{
1271 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1272}
1273
bae94d02
IPG
1274/**
1275 * pci_enable_device - Initialize device before it's used by a driver.
1276 * @dev: PCI device to be initialized
1277 *
1278 * Initialize device before it's used by a driver. Ask low-level code
1279 * to enable I/O and memory. Wake up the device if it was suspended.
1280 * Beware, this function can fail.
1281 *
1282 * Note we don't actually enable the device many times if we call
1283 * this function repeatedly (we just increment the count).
1284 */
1285int pci_enable_device(struct pci_dev *dev)
1286{
b718989d 1287 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02
IPG
1288}
1289
9ac7849e
TH
1290/*
1291 * Managed PCI resources. This manages device on/off, intx/msi/msix
1292 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1293 * there's no need to track it separately. pci_devres is initialized
1294 * when a device is enabled using managed PCI device enable interface.
1295 */
1296struct pci_devres {
7f375f32
TH
1297 unsigned int enabled:1;
1298 unsigned int pinned:1;
9ac7849e
TH
1299 unsigned int orig_intx:1;
1300 unsigned int restore_intx:1;
1301 u32 region_mask;
1302};
1303
1304static void pcim_release(struct device *gendev, void *res)
1305{
1306 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1307 struct pci_devres *this = res;
1308 int i;
1309
1310 if (dev->msi_enabled)
1311 pci_disable_msi(dev);
1312 if (dev->msix_enabled)
1313 pci_disable_msix(dev);
1314
1315 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1316 if (this->region_mask & (1 << i))
1317 pci_release_region(dev, i);
1318
1319 if (this->restore_intx)
1320 pci_intx(dev, this->orig_intx);
1321
7f375f32 1322 if (this->enabled && !this->pinned)
9ac7849e
TH
1323 pci_disable_device(dev);
1324}
1325
1326static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1327{
1328 struct pci_devres *dr, *new_dr;
1329
1330 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1331 if (dr)
1332 return dr;
1333
1334 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1335 if (!new_dr)
1336 return NULL;
1337 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1338}
1339
1340static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1341{
1342 if (pci_is_managed(pdev))
1343 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1344 return NULL;
1345}
1346
1347/**
1348 * pcim_enable_device - Managed pci_enable_device()
1349 * @pdev: PCI device to be initialized
1350 *
1351 * Managed pci_enable_device().
1352 */
1353int pcim_enable_device(struct pci_dev *pdev)
1354{
1355 struct pci_devres *dr;
1356 int rc;
1357
1358 dr = get_pci_dr(pdev);
1359 if (unlikely(!dr))
1360 return -ENOMEM;
b95d58ea
TH
1361 if (dr->enabled)
1362 return 0;
9ac7849e
TH
1363
1364 rc = pci_enable_device(pdev);
1365 if (!rc) {
1366 pdev->is_managed = 1;
7f375f32 1367 dr->enabled = 1;
9ac7849e
TH
1368 }
1369 return rc;
1370}
1371
1372/**
1373 * pcim_pin_device - Pin managed PCI device
1374 * @pdev: PCI device to pin
1375 *
1376 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1377 * driver detach. @pdev must have been enabled with
1378 * pcim_enable_device().
1379 */
1380void pcim_pin_device(struct pci_dev *pdev)
1381{
1382 struct pci_devres *dr;
1383
1384 dr = find_pci_dr(pdev);
7f375f32 1385 WARN_ON(!dr || !dr->enabled);
9ac7849e 1386 if (dr)
7f375f32 1387 dr->pinned = 1;
9ac7849e
TH
1388}
1389
1da177e4
LT
1390/**
1391 * pcibios_disable_device - disable arch specific PCI resources for device dev
1392 * @dev: the PCI device to disable
1393 *
1394 * Disables architecture specific PCI resources for the device. This
1395 * is the default implementation. Architecture implementations can
1396 * override this.
1397 */
d6d88c83 1398void __weak pcibios_disable_device (struct pci_dev *dev) {}
1da177e4 1399
fa58d305
RW
1400static void do_pci_disable_device(struct pci_dev *dev)
1401{
1402 u16 pci_command;
1403
1404 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1405 if (pci_command & PCI_COMMAND_MASTER) {
1406 pci_command &= ~PCI_COMMAND_MASTER;
1407 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1408 }
1409
1410 pcibios_disable_device(dev);
1411}
1412
1413/**
1414 * pci_disable_enabled_device - Disable device without updating enable_cnt
1415 * @dev: PCI device to disable
1416 *
1417 * NOTE: This function is a backend of PCI power management routines and is
1418 * not supposed to be called drivers.
1419 */
1420void pci_disable_enabled_device(struct pci_dev *dev)
1421{
296ccb08 1422 if (pci_is_enabled(dev))
fa58d305
RW
1423 do_pci_disable_device(dev);
1424}
1425
1da177e4
LT
1426/**
1427 * pci_disable_device - Disable PCI device after use
1428 * @dev: PCI device to be disabled
1429 *
1430 * Signal to the system that the PCI device is not in use by the system
1431 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1432 *
1433 * Note we don't actually disable the device until all callers of
ee6583f6 1434 * pci_enable_device() have called pci_disable_device().
1da177e4
LT
1435 */
1436void
1437pci_disable_device(struct pci_dev *dev)
1438{
9ac7849e 1439 struct pci_devres *dr;
99dc804d 1440
9ac7849e
TH
1441 dr = find_pci_dr(dev);
1442 if (dr)
7f375f32 1443 dr->enabled = 0;
9ac7849e 1444
bae94d02
IPG
1445 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1446 return;
1447
fa58d305 1448 do_pci_disable_device(dev);
1da177e4 1449
fa58d305 1450 dev->is_busmaster = 0;
1da177e4
LT
1451}
1452
f7bdd12d
BK
1453/**
1454 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1455 * @dev: the PCIe device reset
f7bdd12d
BK
1456 * @state: Reset state to enter into
1457 *
1458 *
45e829ea 1459 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1460 * implementation. Architecture implementations can override this.
1461 */
d6d88c83
BH
1462int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1463 enum pcie_reset_state state)
f7bdd12d
BK
1464{
1465 return -EINVAL;
1466}
1467
1468/**
1469 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1470 * @dev: the PCIe device reset
f7bdd12d
BK
1471 * @state: Reset state to enter into
1472 *
1473 *
1474 * Sets the PCI reset state for the device.
1475 */
1476int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1477{
1478 return pcibios_set_pcie_reset_state(dev, state);
1479}
1480
58ff4633
RW
1481/**
1482 * pci_check_pme_status - Check if given device has generated PME.
1483 * @dev: Device to check.
1484 *
1485 * Check the PME status of the device and if set, clear it and clear PME enable
1486 * (if set). Return 'true' if PME status and PME enable were both set or
1487 * 'false' otherwise.
1488 */
1489bool pci_check_pme_status(struct pci_dev *dev)
1490{
1491 int pmcsr_pos;
1492 u16 pmcsr;
1493 bool ret = false;
1494
1495 if (!dev->pm_cap)
1496 return false;
1497
1498 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1499 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1500 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1501 return false;
1502
1503 /* Clear PME status. */
1504 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1505 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1506 /* Disable PME to avoid interrupt flood. */
1507 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1508 ret = true;
1509 }
1510
1511 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1512
1513 return ret;
1514}
1515
b67ea761
RW
1516/**
1517 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1518 * @dev: Device to handle.
379021d5 1519 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1520 *
1521 * Check if @dev has generated PME and queue a resume request for it in that
1522 * case.
1523 */
379021d5 1524static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1525{
379021d5
RW
1526 if (pme_poll_reset && dev->pme_poll)
1527 dev->pme_poll = false;
1528
c125e96f 1529 if (pci_check_pme_status(dev)) {
c125e96f 1530 pci_wakeup_event(dev);
0f953bf6 1531 pm_request_resume(&dev->dev);
c125e96f 1532 }
b67ea761
RW
1533 return 0;
1534}
1535
1536/**
1537 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1538 * @bus: Top bus of the subtree to walk.
1539 */
1540void pci_pme_wakeup_bus(struct pci_bus *bus)
1541{
1542 if (bus)
379021d5 1543 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1544}
1545
448bd857
HY
1546/**
1547 * pci_wakeup - Wake up a PCI device
1548 * @dev: Device to handle.
1549 * @ign: ignored parameter
1550 */
1551static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1552{
1553 pci_wakeup_event(pci_dev);
1554 pm_request_resume(&pci_dev->dev);
1555 return 0;
1556}
1557
1558/**
1559 * pci_wakeup_bus - Walk given bus and wake up devices on it
1560 * @bus: Top bus of the subtree to walk.
1561 */
1562void pci_wakeup_bus(struct pci_bus *bus)
1563{
1564 if (bus)
1565 pci_walk_bus(bus, pci_wakeup, NULL);
1566}
1567
eb9d0fe4
RW
1568/**
1569 * pci_pme_capable - check the capability of PCI device to generate PME#
1570 * @dev: PCI device to handle.
eb9d0fe4
RW
1571 * @state: PCI state from which device will issue PME#.
1572 */
e5899e1b 1573bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1574{
337001b6 1575 if (!dev->pm_cap)
eb9d0fe4
RW
1576 return false;
1577
337001b6 1578 return !!(dev->pme_support & (1 << state));
eb9d0fe4
RW
1579}
1580
df17e62e
MG
1581static void pci_pme_list_scan(struct work_struct *work)
1582{
379021d5 1583 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1584
1585 mutex_lock(&pci_pme_list_mutex);
1586 if (!list_empty(&pci_pme_list)) {
379021d5
RW
1587 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1588 if (pme_dev->dev->pme_poll) {
71a83bd7
ZY
1589 struct pci_dev *bridge;
1590
1591 bridge = pme_dev->dev->bus->self;
1592 /*
1593 * If bridge is in low power state, the
1594 * configuration space of subordinate devices
1595 * may be not accessible
1596 */
1597 if (bridge && bridge->current_state != PCI_D0)
1598 continue;
379021d5
RW
1599 pci_pme_wakeup(pme_dev->dev, NULL);
1600 } else {
1601 list_del(&pme_dev->list);
1602 kfree(pme_dev);
1603 }
1604 }
1605 if (!list_empty(&pci_pme_list))
1606 schedule_delayed_work(&pci_pme_work,
1607 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1608 }
1609 mutex_unlock(&pci_pme_list_mutex);
1610}
1611
eb9d0fe4
RW
1612/**
1613 * pci_pme_active - enable or disable PCI device's PME# function
1614 * @dev: PCI device to handle.
eb9d0fe4
RW
1615 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1616 *
1617 * The caller must verify that the device is capable of generating PME# before
1618 * calling this function with @enable equal to 'true'.
1619 */
5a6c9b60 1620void pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1621{
1622 u16 pmcsr;
1623
337001b6 1624 if (!dev->pm_cap)
eb9d0fe4
RW
1625 return;
1626
337001b6 1627 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1628 /* Clear PME_Status by writing 1 to it and enable PME# */
1629 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1630 if (!enable)
1631 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1632
337001b6 1633 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
eb9d0fe4 1634
df17e62e
MG
1635 /* PCI (as opposed to PCIe) PME requires that the device have
1636 its PME# line hooked up correctly. Not all hardware vendors
1637 do this, so the PME never gets delivered and the device
1638 remains asleep. The easiest way around this is to
1639 periodically walk the list of suspended devices and check
1640 whether any have their PME flag set. The assumption is that
1641 we'll wake up often enough anyway that this won't be a huge
1642 hit, and the power savings from the devices will still be a
1643 win. */
1644
379021d5 1645 if (dev->pme_poll) {
df17e62e
MG
1646 struct pci_pme_device *pme_dev;
1647 if (enable) {
1648 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1649 GFP_KERNEL);
1650 if (!pme_dev)
1651 goto out;
1652 pme_dev->dev = dev;
1653 mutex_lock(&pci_pme_list_mutex);
1654 list_add(&pme_dev->list, &pci_pme_list);
1655 if (list_is_singular(&pci_pme_list))
1656 schedule_delayed_work(&pci_pme_work,
1657 msecs_to_jiffies(PME_TIMEOUT));
1658 mutex_unlock(&pci_pme_list_mutex);
1659 } else {
1660 mutex_lock(&pci_pme_list_mutex);
1661 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1662 if (pme_dev->dev == dev) {
1663 list_del(&pme_dev->list);
1664 kfree(pme_dev);
1665 break;
1666 }
1667 }
1668 mutex_unlock(&pci_pme_list_mutex);
1669 }
1670 }
1671
1672out:
85b8582d 1673 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4
RW
1674}
1675
1da177e4 1676/**
6cbf8214 1677 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1678 * @dev: PCI device affected
1679 * @state: PCI state from which device will issue wakeup events
6cbf8214 1680 * @runtime: True if the events are to be generated at run time
075c1771
DB
1681 * @enable: True to enable event generation; false to disable
1682 *
1683 * This enables the device as a wakeup event source, or disables it.
1684 * When such events involves platform-specific hooks, those hooks are
1685 * called automatically by this routine.
1686 *
1687 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1688 * always require such platform hooks.
075c1771 1689 *
eb9d0fe4
RW
1690 * RETURN VALUE:
1691 * 0 is returned on success
1692 * -EINVAL is returned if device is not supposed to wake up the system
1693 * Error code depending on the platform is returned if both the platform and
1694 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1695 */
6cbf8214
RW
1696int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1697 bool runtime, bool enable)
1da177e4 1698{
5bcc2fb4 1699 int ret = 0;
075c1771 1700
6cbf8214 1701 if (enable && !runtime && !device_may_wakeup(&dev->dev))
eb9d0fe4 1702 return -EINVAL;
1da177e4 1703
e80bb09d
RW
1704 /* Don't do the same thing twice in a row for one device. */
1705 if (!!enable == !!dev->wakeup_prepared)
1706 return 0;
1707
eb9d0fe4
RW
1708 /*
1709 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1710 * Anderson we should be doing PME# wake enable followed by ACPI wake
1711 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1712 */
1da177e4 1713
5bcc2fb4
RW
1714 if (enable) {
1715 int error;
1da177e4 1716
5bcc2fb4
RW
1717 if (pci_pme_capable(dev, state))
1718 pci_pme_active(dev, true);
1719 else
1720 ret = 1;
6cbf8214
RW
1721 error = runtime ? platform_pci_run_wake(dev, true) :
1722 platform_pci_sleep_wake(dev, true);
5bcc2fb4
RW
1723 if (ret)
1724 ret = error;
e80bb09d
RW
1725 if (!ret)
1726 dev->wakeup_prepared = true;
5bcc2fb4 1727 } else {
6cbf8214
RW
1728 if (runtime)
1729 platform_pci_run_wake(dev, false);
1730 else
1731 platform_pci_sleep_wake(dev, false);
5bcc2fb4 1732 pci_pme_active(dev, false);
e80bb09d 1733 dev->wakeup_prepared = false;
5bcc2fb4 1734 }
1da177e4 1735
5bcc2fb4 1736 return ret;
eb9d0fe4 1737}
6cbf8214 1738EXPORT_SYMBOL(__pci_enable_wake);
1da177e4 1739
0235c4fc
RW
1740/**
1741 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1742 * @dev: PCI device to prepare
1743 * @enable: True to enable wake-up event generation; false to disable
1744 *
1745 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1746 * and this function allows them to set that up cleanly - pci_enable_wake()
1747 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1748 * ordering constraints.
1749 *
1750 * This function only returns error code if the device is not capable of
1751 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1752 * enable wake-up power for it.
1753 */
1754int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1755{
1756 return pci_pme_capable(dev, PCI_D3cold) ?
1757 pci_enable_wake(dev, PCI_D3cold, enable) :
1758 pci_enable_wake(dev, PCI_D3hot, enable);
1759}
1760
404cc2d8 1761/**
37139074
JB
1762 * pci_target_state - find an appropriate low power state for a given PCI dev
1763 * @dev: PCI device
1764 *
1765 * Use underlying platform code to find a supported low power state for @dev.
1766 * If the platform can't manage @dev, return the deepest state from which it
1767 * can generate wake events, based on any available PME info.
404cc2d8 1768 */
e5899e1b 1769pci_power_t pci_target_state(struct pci_dev *dev)
404cc2d8
RW
1770{
1771 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
1772
1773 if (platform_pci_power_manageable(dev)) {
1774 /*
1775 * Call the platform to choose the target state of the device
1776 * and enable wake-up from this state if supported.
1777 */
1778 pci_power_t state = platform_pci_choose_state(dev);
1779
1780 switch (state) {
1781 case PCI_POWER_ERROR:
1782 case PCI_UNKNOWN:
1783 break;
1784 case PCI_D1:
1785 case PCI_D2:
1786 if (pci_no_d1d2(dev))
1787 break;
1788 default:
1789 target_state = state;
404cc2d8 1790 }
d2abdf62
RW
1791 } else if (!dev->pm_cap) {
1792 target_state = PCI_D0;
404cc2d8
RW
1793 } else if (device_may_wakeup(&dev->dev)) {
1794 /*
1795 * Find the deepest state from which the device can generate
1796 * wake-up events, make it the target state and enable device
1797 * to generate PME#.
1798 */
337001b6
RW
1799 if (dev->pme_support) {
1800 while (target_state
1801 && !(dev->pme_support & (1 << target_state)))
1802 target_state--;
404cc2d8
RW
1803 }
1804 }
1805
e5899e1b
RW
1806 return target_state;
1807}
1808
1809/**
1810 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1811 * @dev: Device to handle.
1812 *
1813 * Choose the power state appropriate for the device depending on whether
1814 * it can wake up the system and/or is power manageable by the platform
1815 * (PCI_D3hot is the default) and put the device into that state.
1816 */
1817int pci_prepare_to_sleep(struct pci_dev *dev)
1818{
1819 pci_power_t target_state = pci_target_state(dev);
1820 int error;
1821
1822 if (target_state == PCI_POWER_ERROR)
1823 return -EIO;
1824
448bd857
HY
1825 /* D3cold during system suspend/hibernate is not supported */
1826 if (target_state > PCI_D3hot)
1827 target_state = PCI_D3hot;
1828
8efb8c76 1829 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
c157dfa3 1830
404cc2d8
RW
1831 error = pci_set_power_state(dev, target_state);
1832
1833 if (error)
1834 pci_enable_wake(dev, target_state, false);
1835
1836 return error;
1837}
1838
1839/**
443bd1c4 1840 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
1841 * @dev: Device to handle.
1842 *
88393161 1843 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
1844 */
1845int pci_back_from_sleep(struct pci_dev *dev)
1846{
1847 pci_enable_wake(dev, PCI_D0, false);
1848 return pci_set_power_state(dev, PCI_D0);
1849}
1850
6cbf8214
RW
1851/**
1852 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1853 * @dev: PCI device being suspended.
1854 *
1855 * Prepare @dev to generate wake-up events at run time and put it into a low
1856 * power state.
1857 */
1858int pci_finish_runtime_suspend(struct pci_dev *dev)
1859{
1860 pci_power_t target_state = pci_target_state(dev);
1861 int error;
1862
1863 if (target_state == PCI_POWER_ERROR)
1864 return -EIO;
1865
448bd857
HY
1866 dev->runtime_d3cold = target_state == PCI_D3cold;
1867
6cbf8214
RW
1868 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1869
1870 error = pci_set_power_state(dev, target_state);
1871
448bd857 1872 if (error) {
6cbf8214 1873 __pci_enable_wake(dev, target_state, true, false);
448bd857
HY
1874 dev->runtime_d3cold = false;
1875 }
6cbf8214
RW
1876
1877 return error;
1878}
1879
b67ea761
RW
1880/**
1881 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1882 * @dev: Device to check.
1883 *
1884 * Return true if the device itself is cabable of generating wake-up events
1885 * (through the platform or using the native PCIe PME) or if the device supports
1886 * PME and one of its upstream bridges can generate wake-up events.
1887 */
1888bool pci_dev_run_wake(struct pci_dev *dev)
1889{
1890 struct pci_bus *bus = dev->bus;
1891
1892 if (device_run_wake(&dev->dev))
1893 return true;
1894
1895 if (!dev->pme_support)
1896 return false;
1897
1898 while (bus->parent) {
1899 struct pci_dev *bridge = bus->self;
1900
1901 if (device_run_wake(&bridge->dev))
1902 return true;
1903
1904 bus = bus->parent;
1905 }
1906
1907 /* We have reached the root bus. */
1908 if (bus->bridge)
1909 return device_run_wake(bus->bridge);
1910
1911 return false;
1912}
1913EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1914
eb9d0fe4
RW
1915/**
1916 * pci_pm_init - Initialize PM functions of given PCI device
1917 * @dev: PCI device to handle.
1918 */
1919void pci_pm_init(struct pci_dev *dev)
1920{
1921 int pm;
1922 u16 pmc;
1da177e4 1923
bb910a70 1924 pm_runtime_forbid(&dev->dev);
a1e4d72c 1925 device_enable_async_suspend(&dev->dev);
e80bb09d 1926 dev->wakeup_prepared = false;
bb910a70 1927
337001b6
RW
1928 dev->pm_cap = 0;
1929
eb9d0fe4
RW
1930 /* find PCI PM capability in list */
1931 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1932 if (!pm)
50246dd4 1933 return;
eb9d0fe4
RW
1934 /* Check device's ability to generate PME# */
1935 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 1936
eb9d0fe4
RW
1937 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1938 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1939 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 1940 return;
eb9d0fe4
RW
1941 }
1942
337001b6 1943 dev->pm_cap = pm;
1ae861e6 1944 dev->d3_delay = PCI_PM_D3_WAIT;
448bd857 1945 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
337001b6
RW
1946
1947 dev->d1_support = false;
1948 dev->d2_support = false;
1949 if (!pci_no_d1d2(dev)) {
c9ed77ee 1950 if (pmc & PCI_PM_CAP_D1)
337001b6 1951 dev->d1_support = true;
c9ed77ee 1952 if (pmc & PCI_PM_CAP_D2)
337001b6 1953 dev->d2_support = true;
c9ed77ee
BH
1954
1955 if (dev->d1_support || dev->d2_support)
1956 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
ec84f126
JB
1957 dev->d1_support ? " D1" : "",
1958 dev->d2_support ? " D2" : "");
337001b6
RW
1959 }
1960
1961 pmc &= PCI_PM_CAP_PME_MASK;
1962 if (pmc) {
10c3d71d
BH
1963 dev_printk(KERN_DEBUG, &dev->dev,
1964 "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
1965 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1966 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1967 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1968 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1969 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 1970 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 1971 dev->pme_poll = true;
eb9d0fe4
RW
1972 /*
1973 * Make device's PM flags reflect the wake-up capability, but
1974 * let the user space enable it to wake up the system as needed.
1975 */
1976 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 1977 /* Disable the PME# generation functionality */
337001b6
RW
1978 pci_pme_active(dev, false);
1979 } else {
1980 dev->pme_support = 0;
eb9d0fe4 1981 }
1da177e4
LT
1982}
1983
eb9c39d0
JB
1984/**
1985 * platform_pci_wakeup_init - init platform wakeup if present
1986 * @dev: PCI device
1987 *
1988 * Some devices don't have PCI PM caps but can still generate wakeup
1989 * events through platform methods (like ACPI events). If @dev supports
1990 * platform wakeup events, set the device flag to indicate as much. This
1991 * may be redundant if the device also supports PCI PM caps, but double
1992 * initialization should be safe in that case.
1993 */
1994void platform_pci_wakeup_init(struct pci_dev *dev)
1995{
1996 if (!platform_pci_can_wakeup(dev))
1997 return;
1998
1999 device_set_wakeup_capable(&dev->dev, true);
eb9c39d0
JB
2000 platform_pci_sleep_wake(dev, false);
2001}
2002
34a4876e
YL
2003static void pci_add_saved_cap(struct pci_dev *pci_dev,
2004 struct pci_cap_saved_state *new_cap)
2005{
2006 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2007}
2008
63f4898a
RW
2009/**
2010 * pci_add_save_buffer - allocate buffer for saving given capability registers
2011 * @dev: the PCI device
2012 * @cap: the capability to allocate the buffer for
2013 * @size: requested size of the buffer
2014 */
2015static int pci_add_cap_save_buffer(
2016 struct pci_dev *dev, char cap, unsigned int size)
2017{
2018 int pos;
2019 struct pci_cap_saved_state *save_state;
2020
2021 pos = pci_find_capability(dev, cap);
2022 if (pos <= 0)
2023 return 0;
2024
2025 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2026 if (!save_state)
2027 return -ENOMEM;
2028
24a4742f
AW
2029 save_state->cap.cap_nr = cap;
2030 save_state->cap.size = size;
63f4898a
RW
2031 pci_add_saved_cap(dev, save_state);
2032
2033 return 0;
2034}
2035
2036/**
2037 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2038 * @dev: the PCI device
2039 */
2040void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2041{
2042 int error;
2043
89858517
YZ
2044 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2045 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a
RW
2046 if (error)
2047 dev_err(&dev->dev,
2048 "unable to preallocate PCI Express save buffer\n");
2049
2050 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2051 if (error)
2052 dev_err(&dev->dev,
2053 "unable to preallocate PCI-X save buffer\n");
2054}
2055
f796841e
YL
2056void pci_free_cap_save_buffers(struct pci_dev *dev)
2057{
2058 struct pci_cap_saved_state *tmp;
2059 struct hlist_node *pos, *n;
2060
2061 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2062 kfree(tmp);
2063}
2064
58c3a727
YZ
2065/**
2066 * pci_enable_ari - enable ARI forwarding if hardware support it
2067 * @dev: the PCI device
2068 */
2069void pci_enable_ari(struct pci_dev *dev)
2070{
2071 int pos;
2072 u32 cap;
c463b8cb 2073 u16 ctrl;
8113587c 2074 struct pci_dev *bridge;
58c3a727 2075
6748dcc2 2076 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
2077 return;
2078
8113587c
ZY
2079 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2080 if (!pos)
58c3a727
YZ
2081 return;
2082
8113587c 2083 bridge = dev->bus->self;
cb97ae34 2084 if (!bridge)
8113587c
ZY
2085 return;
2086
c463b8cb
MS
2087 /* ARI is a PCIe cap v2 feature */
2088 pos = pci_pcie_cap2(bridge);
58c3a727
YZ
2089 if (!pos)
2090 return;
2091
8113587c 2092 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2093 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2094 return;
2095
8113587c 2096 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
58c3a727 2097 ctrl |= PCI_EXP_DEVCTL2_ARI;
8113587c 2098 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
58c3a727 2099
8113587c 2100 bridge->ari_enabled = 1;
58c3a727
YZ
2101}
2102
b48d4425 2103/**
c463b8cb 2104 * pci_enable_ido - enable ID-based Ordering on a device
b48d4425
JB
2105 * @dev: the PCI device
2106 * @type: which types of IDO to enable
2107 *
2108 * Enable ID-based ordering on @dev. @type can contain the bits
2109 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2110 * which types of transactions are allowed to be re-ordered.
2111 */
2112void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2113{
2114 int pos;
2115 u16 ctrl;
2116
c463b8cb
MS
2117 /* ID-based Ordering is a PCIe cap v2 feature */
2118 pos = pci_pcie_cap2(dev);
b48d4425
JB
2119 if (!pos)
2120 return;
2121
2122 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2123 if (type & PCI_EXP_IDO_REQUEST)
2124 ctrl |= PCI_EXP_IDO_REQ_EN;
2125 if (type & PCI_EXP_IDO_COMPLETION)
2126 ctrl |= PCI_EXP_IDO_CMP_EN;
2127 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2128}
2129EXPORT_SYMBOL(pci_enable_ido);
2130
2131/**
2132 * pci_disable_ido - disable ID-based ordering on a device
2133 * @dev: the PCI device
2134 * @type: which types of IDO to disable
2135 */
2136void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2137{
2138 int pos;
2139 u16 ctrl;
2140
c463b8cb
MS
2141 /* ID-based Ordering is a PCIe cap v2 feature */
2142 pos = pci_pcie_cap2(dev);
b48d4425
JB
2143 if (!pos)
2144 return;
2145
2146 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2147 if (type & PCI_EXP_IDO_REQUEST)
2148 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2149 if (type & PCI_EXP_IDO_COMPLETION)
2150 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2151 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2152}
2153EXPORT_SYMBOL(pci_disable_ido);
2154
48a92a81
JB
2155/**
2156 * pci_enable_obff - enable optimized buffer flush/fill
2157 * @dev: PCI device
2158 * @type: type of signaling to use
2159 *
2160 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2161 * signaling if possible, falling back to message signaling only if
2162 * WAKE# isn't supported. @type should indicate whether the PCIe link
2163 * be brought out of L0s or L1 to send the message. It should be either
2164 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2165 *
2166 * If your device can benefit from receiving all messages, even at the
2167 * power cost of bringing the link back up from a low power state, use
2168 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2169 * preferred type).
2170 *
2171 * RETURNS:
2172 * Zero on success, appropriate error number on failure.
2173 */
2174int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2175{
2176 int pos;
2177 u32 cap;
2178 u16 ctrl;
2179 int ret;
2180
c463b8cb
MS
2181 /* OBFF is a PCIe cap v2 feature */
2182 pos = pci_pcie_cap2(dev);
48a92a81
JB
2183 if (!pos)
2184 return -ENOTSUPP;
2185
2186 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2187 if (!(cap & PCI_EXP_OBFF_MASK))
2188 return -ENOTSUPP; /* no OBFF support at all */
2189
2190 /* Make sure the topology supports OBFF as well */
8291550f 2191 if (dev->bus->self) {
48a92a81
JB
2192 ret = pci_enable_obff(dev->bus->self, type);
2193 if (ret)
2194 return ret;
2195 }
2196
2197 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2198 if (cap & PCI_EXP_OBFF_WAKE)
2199 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2200 else {
2201 switch (type) {
2202 case PCI_EXP_OBFF_SIGNAL_L0:
2203 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2204 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2205 break;
2206 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2207 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2208 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2209 break;
2210 default:
2211 WARN(1, "bad OBFF signal type\n");
2212 return -ENOTSUPP;
2213 }
2214 }
2215 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2216
2217 return 0;
2218}
2219EXPORT_SYMBOL(pci_enable_obff);
2220
2221/**
2222 * pci_disable_obff - disable optimized buffer flush/fill
2223 * @dev: PCI device
2224 *
2225 * Disable OBFF on @dev.
2226 */
2227void pci_disable_obff(struct pci_dev *dev)
2228{
2229 int pos;
2230 u16 ctrl;
2231
c463b8cb
MS
2232 /* OBFF is a PCIe cap v2 feature */
2233 pos = pci_pcie_cap2(dev);
48a92a81
JB
2234 if (!pos)
2235 return;
2236
2237 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2238 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2239 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2240}
2241EXPORT_SYMBOL(pci_disable_obff);
2242
51c2e0a7
JB
2243/**
2244 * pci_ltr_supported - check whether a device supports LTR
2245 * @dev: PCI device
2246 *
2247 * RETURNS:
2248 * True if @dev supports latency tolerance reporting, false otherwise.
2249 */
c32823f8 2250static bool pci_ltr_supported(struct pci_dev *dev)
51c2e0a7
JB
2251{
2252 int pos;
2253 u32 cap;
2254
c463b8cb
MS
2255 /* LTR is a PCIe cap v2 feature */
2256 pos = pci_pcie_cap2(dev);
51c2e0a7
JB
2257 if (!pos)
2258 return false;
2259
2260 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2261
2262 return cap & PCI_EXP_DEVCAP2_LTR;
2263}
51c2e0a7
JB
2264
2265/**
2266 * pci_enable_ltr - enable latency tolerance reporting
2267 * @dev: PCI device
2268 *
2269 * Enable LTR on @dev if possible, which means enabling it first on
2270 * upstream ports.
2271 *
2272 * RETURNS:
2273 * Zero on success, errno on failure.
2274 */
2275int pci_enable_ltr(struct pci_dev *dev)
2276{
2277 int pos;
2278 u16 ctrl;
2279 int ret;
2280
2281 if (!pci_ltr_supported(dev))
2282 return -ENOTSUPP;
2283
c463b8cb
MS
2284 /* LTR is a PCIe cap v2 feature */
2285 pos = pci_pcie_cap2(dev);
51c2e0a7
JB
2286 if (!pos)
2287 return -ENOTSUPP;
2288
2289 /* Only primary function can enable/disable LTR */
2290 if (PCI_FUNC(dev->devfn) != 0)
2291 return -EINVAL;
2292
2293 /* Enable upstream ports first */
8291550f 2294 if (dev->bus->self) {
51c2e0a7
JB
2295 ret = pci_enable_ltr(dev->bus->self);
2296 if (ret)
2297 return ret;
2298 }
2299
2300 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2301 ctrl |= PCI_EXP_LTR_EN;
2302 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2303
2304 return 0;
2305}
2306EXPORT_SYMBOL(pci_enable_ltr);
2307
2308/**
2309 * pci_disable_ltr - disable latency tolerance reporting
2310 * @dev: PCI device
2311 */
2312void pci_disable_ltr(struct pci_dev *dev)
2313{
2314 int pos;
2315 u16 ctrl;
2316
2317 if (!pci_ltr_supported(dev))
2318 return;
2319
c463b8cb
MS
2320 /* LTR is a PCIe cap v2 feature */
2321 pos = pci_pcie_cap2(dev);
51c2e0a7
JB
2322 if (!pos)
2323 return;
2324
2325 /* Only primary function can enable/disable LTR */
2326 if (PCI_FUNC(dev->devfn) != 0)
2327 return;
2328
2329 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2330 ctrl &= ~PCI_EXP_LTR_EN;
2331 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2332}
2333EXPORT_SYMBOL(pci_disable_ltr);
2334
2335static int __pci_ltr_scale(int *val)
2336{
2337 int scale = 0;
2338
2339 while (*val > 1023) {
2340 *val = (*val + 31) / 32;
2341 scale++;
2342 }
2343 return scale;
2344}
2345
2346/**
2347 * pci_set_ltr - set LTR latency values
2348 * @dev: PCI device
2349 * @snoop_lat_ns: snoop latency in nanoseconds
2350 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2351 *
2352 * Figure out the scale and set the LTR values accordingly.
2353 */
2354int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2355{
2356 int pos, ret, snoop_scale, nosnoop_scale;
2357 u16 val;
2358
2359 if (!pci_ltr_supported(dev))
2360 return -ENOTSUPP;
2361
2362 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2363 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2364
2365 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2366 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2367 return -EINVAL;
2368
2369 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2370 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2371 return -EINVAL;
2372
2373 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2374 if (!pos)
2375 return -ENOTSUPP;
2376
2377 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2378 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2379 if (ret != 4)
2380 return -EIO;
2381
2382 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2383 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2384 if (ret != 4)
2385 return -EIO;
2386
2387 return 0;
2388}
2389EXPORT_SYMBOL(pci_set_ltr);
2390
5d990b62
CW
2391static int pci_acs_enable;
2392
2393/**
2394 * pci_request_acs - ask for ACS to be enabled if supported
2395 */
2396void pci_request_acs(void)
2397{
2398 pci_acs_enable = 1;
2399}
2400
ae21ee65
AK
2401/**
2402 * pci_enable_acs - enable ACS if hardware support it
2403 * @dev: the PCI device
2404 */
2405void pci_enable_acs(struct pci_dev *dev)
2406{
2407 int pos;
2408 u16 cap;
2409 u16 ctrl;
2410
5d990b62
CW
2411 if (!pci_acs_enable)
2412 return;
2413
5f4d91a1 2414 if (!pci_is_pcie(dev))
ae21ee65
AK
2415 return;
2416
2417 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2418 if (!pos)
2419 return;
2420
2421 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2422 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2423
2424 /* Source Validation */
2425 ctrl |= (cap & PCI_ACS_SV);
2426
2427 /* P2P Request Redirect */
2428 ctrl |= (cap & PCI_ACS_RR);
2429
2430 /* P2P Completion Redirect */
2431 ctrl |= (cap & PCI_ACS_CR);
2432
2433 /* Upstream Forwarding */
2434 ctrl |= (cap & PCI_ACS_UF);
2435
2436 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2437}
2438
ad805758
AW
2439/**
2440 * pci_acs_enabled - test ACS against required flags for a given device
2441 * @pdev: device to test
2442 * @acs_flags: required PCI ACS flags
2443 *
2444 * Return true if the device supports the provided flags. Automatically
2445 * filters out flags that are not implemented on multifunction devices.
2446 */
2447bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2448{
2449 int pos, ret;
2450 u16 ctrl;
2451
2452 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2453 if (ret >= 0)
2454 return ret > 0;
2455
2456 if (!pci_is_pcie(pdev))
2457 return false;
2458
2459 /* Filter out flags not applicable to multifunction */
2460 if (pdev->multifunction)
2461 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2462 PCI_ACS_EC | PCI_ACS_DT);
2463
62f87c0e
YW
2464 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
2465 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
ad805758
AW
2466 pdev->multifunction) {
2467 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2468 if (!pos)
2469 return false;
2470
2471 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2472 if ((ctrl & acs_flags) != acs_flags)
2473 return false;
2474 }
2475
2476 return true;
2477}
2478
2479/**
2480 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2481 * @start: starting downstream device
2482 * @end: ending upstream device or NULL to search to the root bus
2483 * @acs_flags: required flags
2484 *
2485 * Walk up a device tree from start to end testing PCI ACS support. If
2486 * any step along the way does not support the required flags, return false.
2487 */
2488bool pci_acs_path_enabled(struct pci_dev *start,
2489 struct pci_dev *end, u16 acs_flags)
2490{
2491 struct pci_dev *pdev, *parent = start;
2492
2493 do {
2494 pdev = parent;
2495
2496 if (!pci_acs_enabled(pdev, acs_flags))
2497 return false;
2498
2499 if (pci_is_root_bus(pdev->bus))
2500 return (end == NULL);
2501
2502 parent = pdev->bus->self;
2503 } while (pdev != end);
2504
2505 return true;
2506}
2507
57c2cf71
BH
2508/**
2509 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2510 * @dev: the PCI device
2511 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2512 *
2513 * Perform INTx swizzling for a device behind one level of bridge. This is
2514 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
2515 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2516 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2517 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 2518 */
3df425f3 2519u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 2520{
46b952a3
MW
2521 int slot;
2522
2523 if (pci_ari_enabled(dev->bus))
2524 slot = 0;
2525 else
2526 slot = PCI_SLOT(dev->devfn);
2527
2528 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
2529}
2530
1da177e4
LT
2531int
2532pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2533{
2534 u8 pin;
2535
514d207d 2536 pin = dev->pin;
1da177e4
LT
2537 if (!pin)
2538 return -1;
878f2e50 2539
8784fd4d 2540 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 2541 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
2542 dev = dev->bus->self;
2543 }
2544 *bridge = dev;
2545 return pin;
2546}
2547
68feac87
BH
2548/**
2549 * pci_common_swizzle - swizzle INTx all the way to root bridge
2550 * @dev: the PCI device
2551 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2552 *
2553 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2554 * bridges all the way up to a PCI root bus.
2555 */
2556u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2557{
2558 u8 pin = *pinp;
2559
1eb39487 2560 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
2561 pin = pci_swizzle_interrupt_pin(dev, pin);
2562 dev = dev->bus->self;
2563 }
2564 *pinp = pin;
2565 return PCI_SLOT(dev->devfn);
2566}
2567
1da177e4
LT
2568/**
2569 * pci_release_region - Release a PCI bar
2570 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2571 * @bar: BAR to release
2572 *
2573 * Releases the PCI I/O and memory resources previously reserved by a
2574 * successful call to pci_request_region. Call this function only
2575 * after all use of the PCI regions has ceased.
2576 */
2577void pci_release_region(struct pci_dev *pdev, int bar)
2578{
9ac7849e
TH
2579 struct pci_devres *dr;
2580
1da177e4
LT
2581 if (pci_resource_len(pdev, bar) == 0)
2582 return;
2583 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2584 release_region(pci_resource_start(pdev, bar),
2585 pci_resource_len(pdev, bar));
2586 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2587 release_mem_region(pci_resource_start(pdev, bar),
2588 pci_resource_len(pdev, bar));
9ac7849e
TH
2589
2590 dr = find_pci_dr(pdev);
2591 if (dr)
2592 dr->region_mask &= ~(1 << bar);
1da177e4
LT
2593}
2594
2595/**
f5ddcac4 2596 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
2597 * @pdev: PCI device whose resources are to be reserved
2598 * @bar: BAR to be reserved
2599 * @res_name: Name to be associated with resource.
f5ddcac4 2600 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
2601 *
2602 * Mark the PCI region associated with PCI device @pdev BR @bar as
2603 * being reserved by owner @res_name. Do not access any
2604 * address inside the PCI regions unless this call returns
2605 * successfully.
2606 *
f5ddcac4
RD
2607 * If @exclusive is set, then the region is marked so that userspace
2608 * is explicitly not allowed to map the resource via /dev/mem or
2609 * sysfs MMIO access.
2610 *
1da177e4
LT
2611 * Returns 0 on success, or %EBUSY on error. A warning
2612 * message is also printed on failure.
2613 */
e8de1481
AV
2614static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2615 int exclusive)
1da177e4 2616{
9ac7849e
TH
2617 struct pci_devres *dr;
2618
1da177e4
LT
2619 if (pci_resource_len(pdev, bar) == 0)
2620 return 0;
2621
2622 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2623 if (!request_region(pci_resource_start(pdev, bar),
2624 pci_resource_len(pdev, bar), res_name))
2625 goto err_out;
2626 }
2627 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
2628 if (!__request_mem_region(pci_resource_start(pdev, bar),
2629 pci_resource_len(pdev, bar), res_name,
2630 exclusive))
1da177e4
LT
2631 goto err_out;
2632 }
9ac7849e
TH
2633
2634 dr = find_pci_dr(pdev);
2635 if (dr)
2636 dr->region_mask |= 1 << bar;
2637
1da177e4
LT
2638 return 0;
2639
2640err_out:
c7dabef8 2641 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 2642 &pdev->resource[bar]);
1da177e4
LT
2643 return -EBUSY;
2644}
2645
e8de1481 2646/**
f5ddcac4 2647 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
2648 * @pdev: PCI device whose resources are to be reserved
2649 * @bar: BAR to be reserved
f5ddcac4 2650 * @res_name: Name to be associated with resource
e8de1481 2651 *
f5ddcac4 2652 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
2653 * being reserved by owner @res_name. Do not access any
2654 * address inside the PCI regions unless this call returns
2655 * successfully.
2656 *
2657 * Returns 0 on success, or %EBUSY on error. A warning
2658 * message is also printed on failure.
2659 */
2660int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2661{
2662 return __pci_request_region(pdev, bar, res_name, 0);
2663}
2664
2665/**
2666 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2667 * @pdev: PCI device whose resources are to be reserved
2668 * @bar: BAR to be reserved
2669 * @res_name: Name to be associated with resource.
2670 *
2671 * Mark the PCI region associated with PCI device @pdev BR @bar as
2672 * being reserved by owner @res_name. Do not access any
2673 * address inside the PCI regions unless this call returns
2674 * successfully.
2675 *
2676 * Returns 0 on success, or %EBUSY on error. A warning
2677 * message is also printed on failure.
2678 *
2679 * The key difference that _exclusive makes it that userspace is
2680 * explicitly not allowed to map the resource via /dev/mem or
2681 * sysfs.
2682 */
2683int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2684{
2685 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2686}
c87deff7
HS
2687/**
2688 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2689 * @pdev: PCI device whose resources were previously reserved
2690 * @bars: Bitmask of BARs to be released
2691 *
2692 * Release selected PCI I/O and memory resources previously reserved.
2693 * Call this function only after all use of the PCI regions has ceased.
2694 */
2695void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2696{
2697 int i;
2698
2699 for (i = 0; i < 6; i++)
2700 if (bars & (1 << i))
2701 pci_release_region(pdev, i);
2702}
2703
e8de1481
AV
2704int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2705 const char *res_name, int excl)
c87deff7
HS
2706{
2707 int i;
2708
2709 for (i = 0; i < 6; i++)
2710 if (bars & (1 << i))
e8de1481 2711 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
2712 goto err_out;
2713 return 0;
2714
2715err_out:
2716 while(--i >= 0)
2717 if (bars & (1 << i))
2718 pci_release_region(pdev, i);
2719
2720 return -EBUSY;
2721}
1da177e4 2722
e8de1481
AV
2723
2724/**
2725 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2726 * @pdev: PCI device whose resources are to be reserved
2727 * @bars: Bitmask of BARs to be requested
2728 * @res_name: Name to be associated with resource
2729 */
2730int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2731 const char *res_name)
2732{
2733 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2734}
2735
2736int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2737 int bars, const char *res_name)
2738{
2739 return __pci_request_selected_regions(pdev, bars, res_name,
2740 IORESOURCE_EXCLUSIVE);
2741}
2742
1da177e4
LT
2743/**
2744 * pci_release_regions - Release reserved PCI I/O and memory resources
2745 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2746 *
2747 * Releases all PCI I/O and memory resources previously reserved by a
2748 * successful call to pci_request_regions. Call this function only
2749 * after all use of the PCI regions has ceased.
2750 */
2751
2752void pci_release_regions(struct pci_dev *pdev)
2753{
c87deff7 2754 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4
LT
2755}
2756
2757/**
2758 * pci_request_regions - Reserved PCI I/O and memory resources
2759 * @pdev: PCI device whose resources are to be reserved
2760 * @res_name: Name to be associated with resource.
2761 *
2762 * Mark all PCI regions associated with PCI device @pdev as
2763 * being reserved by owner @res_name. Do not access any
2764 * address inside the PCI regions unless this call returns
2765 * successfully.
2766 *
2767 * Returns 0 on success, or %EBUSY on error. A warning
2768 * message is also printed on failure.
2769 */
3c990e92 2770int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 2771{
c87deff7 2772 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4
LT
2773}
2774
e8de1481
AV
2775/**
2776 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2777 * @pdev: PCI device whose resources are to be reserved
2778 * @res_name: Name to be associated with resource.
2779 *
2780 * Mark all PCI regions associated with PCI device @pdev as
2781 * being reserved by owner @res_name. Do not access any
2782 * address inside the PCI regions unless this call returns
2783 * successfully.
2784 *
2785 * pci_request_regions_exclusive() will mark the region so that
2786 * /dev/mem and the sysfs MMIO access will not be allowed.
2787 *
2788 * Returns 0 on success, or %EBUSY on error. A warning
2789 * message is also printed on failure.
2790 */
2791int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2792{
2793 return pci_request_selected_regions_exclusive(pdev,
2794 ((1 << 6) - 1), res_name);
2795}
2796
6a479079
BH
2797static void __pci_set_master(struct pci_dev *dev, bool enable)
2798{
2799 u16 old_cmd, cmd;
2800
2801 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2802 if (enable)
2803 cmd = old_cmd | PCI_COMMAND_MASTER;
2804 else
2805 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2806 if (cmd != old_cmd) {
2807 dev_dbg(&dev->dev, "%s bus mastering\n",
2808 enable ? "enabling" : "disabling");
2809 pci_write_config_word(dev, PCI_COMMAND, cmd);
2810 }
2811 dev->is_busmaster = enable;
2812}
e8de1481 2813
2b6f2c35
MS
2814/**
2815 * pcibios_setup - process "pci=" kernel boot arguments
2816 * @str: string used to pass in "pci=" kernel boot arguments
2817 *
2818 * Process kernel boot arguments. This is the default implementation.
2819 * Architecture specific implementations can override this as necessary.
2820 */
2821char * __weak __init pcibios_setup(char *str)
2822{
2823 return str;
2824}
2825
96c55900
MS
2826/**
2827 * pcibios_set_master - enable PCI bus-mastering for device dev
2828 * @dev: the PCI device to enable
2829 *
2830 * Enables PCI bus-mastering for the device. This is the default
2831 * implementation. Architecture specific implementations can override
2832 * this if necessary.
2833 */
2834void __weak pcibios_set_master(struct pci_dev *dev)
2835{
2836 u8 lat;
2837
f676678f
MS
2838 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2839 if (pci_is_pcie(dev))
2840 return;
2841
96c55900
MS
2842 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2843 if (lat < 16)
2844 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2845 else if (lat > pcibios_max_latency)
2846 lat = pcibios_max_latency;
2847 else
2848 return;
2849 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2850 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2851}
2852
1da177e4
LT
2853/**
2854 * pci_set_master - enables bus-mastering for device dev
2855 * @dev: the PCI device to enable
2856 *
2857 * Enables bus-mastering on the device and calls pcibios_set_master()
2858 * to do the needed arch specific settings.
2859 */
6a479079 2860void pci_set_master(struct pci_dev *dev)
1da177e4 2861{
6a479079 2862 __pci_set_master(dev, true);
1da177e4
LT
2863 pcibios_set_master(dev);
2864}
2865
6a479079
BH
2866/**
2867 * pci_clear_master - disables bus-mastering for device dev
2868 * @dev: the PCI device to disable
2869 */
2870void pci_clear_master(struct pci_dev *dev)
2871{
2872 __pci_set_master(dev, false);
2873}
2874
1da177e4 2875/**
edb2d97e
MW
2876 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2877 * @dev: the PCI device for which MWI is to be enabled
1da177e4 2878 *
edb2d97e
MW
2879 * Helper function for pci_set_mwi.
2880 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
2881 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2882 *
2883 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2884 */
15ea76d4 2885int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
2886{
2887 u8 cacheline_size;
2888
2889 if (!pci_cache_line_size)
15ea76d4 2890 return -EINVAL;
1da177e4
LT
2891
2892 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2893 equal to or multiple of the right value. */
2894 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2895 if (cacheline_size >= pci_cache_line_size &&
2896 (cacheline_size % pci_cache_line_size) == 0)
2897 return 0;
2898
2899 /* Write the correct value. */
2900 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2901 /* Read it back. */
2902 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2903 if (cacheline_size == pci_cache_line_size)
2904 return 0;
2905
80ccba11
BH
2906 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2907 "supported\n", pci_cache_line_size << 2);
1da177e4
LT
2908
2909 return -EINVAL;
2910}
15ea76d4
TH
2911EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2912
2913#ifdef PCI_DISABLE_MWI
2914int pci_set_mwi(struct pci_dev *dev)
2915{
2916 return 0;
2917}
2918
2919int pci_try_set_mwi(struct pci_dev *dev)
2920{
2921 return 0;
2922}
2923
2924void pci_clear_mwi(struct pci_dev *dev)
2925{
2926}
2927
2928#else
1da177e4
LT
2929
2930/**
2931 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2932 * @dev: the PCI device for which MWI is enabled
2933 *
694625c0 2934 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
2935 *
2936 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2937 */
2938int
2939pci_set_mwi(struct pci_dev *dev)
2940{
2941 int rc;
2942 u16 cmd;
2943
edb2d97e 2944 rc = pci_set_cacheline_size(dev);
1da177e4
LT
2945 if (rc)
2946 return rc;
2947
2948 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2949 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
80ccba11 2950 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
2951 cmd |= PCI_COMMAND_INVALIDATE;
2952 pci_write_config_word(dev, PCI_COMMAND, cmd);
2953 }
2954
2955 return 0;
2956}
2957
694625c0
RD
2958/**
2959 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2960 * @dev: the PCI device for which MWI is enabled
2961 *
2962 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2963 * Callers are not required to check the return value.
2964 *
2965 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2966 */
2967int pci_try_set_mwi(struct pci_dev *dev)
2968{
2969 int rc = pci_set_mwi(dev);
2970 return rc;
2971}
2972
1da177e4
LT
2973/**
2974 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2975 * @dev: the PCI device to disable
2976 *
2977 * Disables PCI Memory-Write-Invalidate transaction on the device
2978 */
2979void
2980pci_clear_mwi(struct pci_dev *dev)
2981{
2982 u16 cmd;
2983
2984 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2985 if (cmd & PCI_COMMAND_INVALIDATE) {
2986 cmd &= ~PCI_COMMAND_INVALIDATE;
2987 pci_write_config_word(dev, PCI_COMMAND, cmd);
2988 }
2989}
edb2d97e 2990#endif /* ! PCI_DISABLE_MWI */
1da177e4 2991
a04ce0ff
BR
2992/**
2993 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
2994 * @pdev: the PCI device to operate on
2995 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
2996 *
2997 * Enables/disables PCI INTx for device dev
2998 */
2999void
3000pci_intx(struct pci_dev *pdev, int enable)
3001{
3002 u16 pci_command, new;
3003
3004 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3005
3006 if (enable) {
3007 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3008 } else {
3009 new = pci_command | PCI_COMMAND_INTX_DISABLE;
3010 }
3011
3012 if (new != pci_command) {
9ac7849e
TH
3013 struct pci_devres *dr;
3014
2fd9d74b 3015 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
3016
3017 dr = find_pci_dr(pdev);
3018 if (dr && !dr->restore_intx) {
3019 dr->restore_intx = 1;
3020 dr->orig_intx = !enable;
3021 }
a04ce0ff
BR
3022 }
3023}
3024
a2e27787
JK
3025/**
3026 * pci_intx_mask_supported - probe for INTx masking support
6e9292c5 3027 * @dev: the PCI device to operate on
a2e27787
JK
3028 *
3029 * Check if the device dev support INTx masking via the config space
3030 * command word.
3031 */
3032bool pci_intx_mask_supported(struct pci_dev *dev)
3033{
3034 bool mask_supported = false;
3035 u16 orig, new;
3036
fbebb9fd
BH
3037 if (dev->broken_intx_masking)
3038 return false;
3039
a2e27787
JK
3040 pci_cfg_access_lock(dev);
3041
3042 pci_read_config_word(dev, PCI_COMMAND, &orig);
3043 pci_write_config_word(dev, PCI_COMMAND,
3044 orig ^ PCI_COMMAND_INTX_DISABLE);
3045 pci_read_config_word(dev, PCI_COMMAND, &new);
3046
3047 /*
3048 * There's no way to protect against hardware bugs or detect them
3049 * reliably, but as long as we know what the value should be, let's
3050 * go ahead and check it.
3051 */
3052 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3053 dev_err(&dev->dev, "Command register changed from "
3054 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
3055 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3056 mask_supported = true;
3057 pci_write_config_word(dev, PCI_COMMAND, orig);
3058 }
3059
3060 pci_cfg_access_unlock(dev);
3061 return mask_supported;
3062}
3063EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3064
3065static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3066{
3067 struct pci_bus *bus = dev->bus;
3068 bool mask_updated = true;
3069 u32 cmd_status_dword;
3070 u16 origcmd, newcmd;
3071 unsigned long flags;
3072 bool irq_pending;
3073
3074 /*
3075 * We do a single dword read to retrieve both command and status.
3076 * Document assumptions that make this possible.
3077 */
3078 BUILD_BUG_ON(PCI_COMMAND % 4);
3079 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3080
3081 raw_spin_lock_irqsave(&pci_lock, flags);
3082
3083 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3084
3085 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3086
3087 /*
3088 * Check interrupt status register to see whether our device
3089 * triggered the interrupt (when masking) or the next IRQ is
3090 * already pending (when unmasking).
3091 */
3092 if (mask != irq_pending) {
3093 mask_updated = false;
3094 goto done;
3095 }
3096
3097 origcmd = cmd_status_dword;
3098 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3099 if (mask)
3100 newcmd |= PCI_COMMAND_INTX_DISABLE;
3101 if (newcmd != origcmd)
3102 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3103
3104done:
3105 raw_spin_unlock_irqrestore(&pci_lock, flags);
3106
3107 return mask_updated;
3108}
3109
3110/**
3111 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 3112 * @dev: the PCI device to operate on
a2e27787
JK
3113 *
3114 * Check if the device dev has its INTx line asserted, mask it and
3115 * return true in that case. False is returned if not interrupt was
3116 * pending.
3117 */
3118bool pci_check_and_mask_intx(struct pci_dev *dev)
3119{
3120 return pci_check_and_set_intx_mask(dev, true);
3121}
3122EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3123
3124/**
3125 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
6e9292c5 3126 * @dev: the PCI device to operate on
a2e27787
JK
3127 *
3128 * Check if the device dev has its INTx line asserted, unmask it if not
3129 * and return true. False is returned and the mask remains active if
3130 * there was still an interrupt pending.
3131 */
3132bool pci_check_and_unmask_intx(struct pci_dev *dev)
3133{
3134 return pci_check_and_set_intx_mask(dev, false);
3135}
3136EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3137
f5f2b131
EB
3138/**
3139 * pci_msi_off - disables any msi or msix capabilities
8d7d86e9 3140 * @dev: the PCI device to operate on
f5f2b131
EB
3141 *
3142 * If you want to use msi see pci_enable_msi and friends.
3143 * This is a lower level primitive that allows us to disable
3144 * msi operation at the device level.
3145 */
3146void pci_msi_off(struct pci_dev *dev)
3147{
3148 int pos;
3149 u16 control;
3150
3151 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3152 if (pos) {
3153 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3154 control &= ~PCI_MSI_FLAGS_ENABLE;
3155 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3156 }
3157 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3158 if (pos) {
3159 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3160 control &= ~PCI_MSIX_FLAGS_ENABLE;
3161 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3162 }
3163}
b03214d5 3164EXPORT_SYMBOL_GPL(pci_msi_off);
f5f2b131 3165
4d57cdfa
FT
3166int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3167{
3168 return dma_set_max_seg_size(&dev->dev, size);
3169}
3170EXPORT_SYMBOL(pci_set_dma_max_seg_size);
4d57cdfa 3171
59fc67de
FT
3172int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3173{
3174 return dma_set_seg_boundary(&dev->dev, mask);
3175}
3176EXPORT_SYMBOL(pci_set_dma_seg_boundary);
59fc67de 3177
8c1c699f 3178static int pcie_flr(struct pci_dev *dev, int probe)
8dd7f803 3179{
8c1c699f
YZ
3180 int i;
3181 int pos;
8dd7f803 3182 u32 cap;
04b55c47 3183 u16 status, control;
8dd7f803 3184
06a1cbaf 3185 pos = pci_pcie_cap(dev);
8c1c699f 3186 if (!pos)
8dd7f803 3187 return -ENOTTY;
8c1c699f
YZ
3188
3189 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
8dd7f803
SY
3190 if (!(cap & PCI_EXP_DEVCAP_FLR))
3191 return -ENOTTY;
3192
d91cdc74
SY
3193 if (probe)
3194 return 0;
3195
8dd7f803 3196 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3197 for (i = 0; i < 4; i++) {
3198 if (i)
3199 msleep((1 << (i - 1)) * 100);
5fe5db05 3200
8c1c699f
YZ
3201 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3202 if (!(status & PCI_EXP_DEVSTA_TRPND))
3203 goto clear;
3204 }
3205
3206 dev_err(&dev->dev, "transaction is not cleared; "
3207 "proceeding with reset anyway\n");
3208
3209clear:
04b55c47
SR
3210 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3211 control |= PCI_EXP_DEVCTL_BCR_FLR;
3212 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3213
8c1c699f 3214 msleep(100);
8dd7f803 3215
8dd7f803
SY
3216 return 0;
3217}
d91cdc74 3218
8c1c699f 3219static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 3220{
8c1c699f
YZ
3221 int i;
3222 int pos;
1ca88797 3223 u8 cap;
8c1c699f 3224 u8 status;
1ca88797 3225
8c1c699f
YZ
3226 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3227 if (!pos)
1ca88797 3228 return -ENOTTY;
8c1c699f
YZ
3229
3230 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
3231 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3232 return -ENOTTY;
3233
3234 if (probe)
3235 return 0;
3236
1ca88797 3237 /* Wait for Transaction Pending bit clean */
8c1c699f
YZ
3238 for (i = 0; i < 4; i++) {
3239 if (i)
3240 msleep((1 << (i - 1)) * 100);
3241
3242 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3243 if (!(status & PCI_AF_STATUS_TP))
3244 goto clear;
3245 }
5fe5db05 3246
8c1c699f
YZ
3247 dev_err(&dev->dev, "transaction is not cleared; "
3248 "proceeding with reset anyway\n");
5fe5db05 3249
8c1c699f
YZ
3250clear:
3251 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
1ca88797 3252 msleep(100);
8c1c699f 3253
1ca88797
SY
3254 return 0;
3255}
3256
83d74e03
RW
3257/**
3258 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3259 * @dev: Device to reset.
3260 * @probe: If set, only check if the device can be reset this way.
3261 *
3262 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3263 * unset, it will be reinitialized internally when going from PCI_D3hot to
3264 * PCI_D0. If that's the case and the device is not in a low-power state
3265 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3266 *
3267 * NOTE: This causes the caller to sleep for twice the device power transition
3268 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3269 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3270 * Moreover, only devices in D0 can be reset by this function.
3271 */
f85876ba 3272static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 3273{
f85876ba
YZ
3274 u16 csr;
3275
3276 if (!dev->pm_cap)
3277 return -ENOTTY;
d91cdc74 3278
f85876ba
YZ
3279 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3280 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3281 return -ENOTTY;
d91cdc74 3282
f85876ba
YZ
3283 if (probe)
3284 return 0;
1ca88797 3285
f85876ba
YZ
3286 if (dev->current_state != PCI_D0)
3287 return -EINVAL;
3288
3289 csr &= ~PCI_PM_CTRL_STATE_MASK;
3290 csr |= PCI_D3hot;
3291 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3292 pci_dev_d3_sleep(dev);
f85876ba
YZ
3293
3294 csr &= ~PCI_PM_CTRL_STATE_MASK;
3295 csr |= PCI_D0;
3296 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 3297 pci_dev_d3_sleep(dev);
f85876ba
YZ
3298
3299 return 0;
3300}
3301
c12ff1df
YZ
3302static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3303{
3304 u16 ctrl;
3305 struct pci_dev *pdev;
3306
654b75e0 3307 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
c12ff1df
YZ
3308 return -ENOTTY;
3309
3310 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3311 if (pdev != dev)
3312 return -ENOTTY;
3313
3314 if (probe)
3315 return 0;
3316
3317 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3318 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3319 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3320 msleep(100);
3321
3322 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3323 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3324 msleep(100);
3325
3326 return 0;
3327}
3328
977f857c 3329static int __pci_dev_reset(struct pci_dev *dev, int probe)
d91cdc74 3330{
8c1c699f
YZ
3331 int rc;
3332
3333 might_sleep();
3334
b9c3b266
DC
3335 rc = pci_dev_specific_reset(dev, probe);
3336 if (rc != -ENOTTY)
3337 goto done;
3338
8c1c699f
YZ
3339 rc = pcie_flr(dev, probe);
3340 if (rc != -ENOTTY)
3341 goto done;
d91cdc74 3342
8c1c699f 3343 rc = pci_af_flr(dev, probe);
f85876ba
YZ
3344 if (rc != -ENOTTY)
3345 goto done;
3346
3347 rc = pci_pm_reset(dev, probe);
c12ff1df
YZ
3348 if (rc != -ENOTTY)
3349 goto done;
3350
3351 rc = pci_parent_bus_reset(dev, probe);
8c1c699f 3352done:
977f857c
KRW
3353 return rc;
3354}
3355
3356static int pci_dev_reset(struct pci_dev *dev, int probe)
3357{
3358 int rc;
3359
3360 if (!probe) {
3361 pci_cfg_access_lock(dev);
3362 /* block PM suspend, driver probe, etc. */
3363 device_lock(&dev->dev);
3364 }
3365
3366 rc = __pci_dev_reset(dev, probe);
3367
8c1c699f 3368 if (!probe) {
8e9394ce 3369 device_unlock(&dev->dev);
fb51ccbf 3370 pci_cfg_access_unlock(dev);
8c1c699f 3371 }
8c1c699f 3372 return rc;
d91cdc74 3373}
d91cdc74 3374/**
8c1c699f
YZ
3375 * __pci_reset_function - reset a PCI device function
3376 * @dev: PCI device to reset
d91cdc74
SY
3377 *
3378 * Some devices allow an individual function to be reset without affecting
3379 * other functions in the same device. The PCI device must be responsive
3380 * to PCI config space in order to use this function.
3381 *
3382 * The device function is presumed to be unused when this function is called.
3383 * Resetting the device will make the contents of PCI configuration space
3384 * random, so any caller of this must be prepared to reinitialise the
3385 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3386 * etc.
3387 *
8c1c699f 3388 * Returns 0 if the device function was successfully reset or negative if the
d91cdc74
SY
3389 * device doesn't support resetting a single function.
3390 */
8c1c699f 3391int __pci_reset_function(struct pci_dev *dev)
d91cdc74 3392{
8c1c699f 3393 return pci_dev_reset(dev, 0);
d91cdc74 3394}
8c1c699f 3395EXPORT_SYMBOL_GPL(__pci_reset_function);
8dd7f803 3396
6fbf9e7a
KRW
3397/**
3398 * __pci_reset_function_locked - reset a PCI device function while holding
3399 * the @dev mutex lock.
3400 * @dev: PCI device to reset
3401 *
3402 * Some devices allow an individual function to be reset without affecting
3403 * other functions in the same device. The PCI device must be responsive
3404 * to PCI config space in order to use this function.
3405 *
3406 * The device function is presumed to be unused and the caller is holding
3407 * the device mutex lock when this function is called.
3408 * Resetting the device will make the contents of PCI configuration space
3409 * random, so any caller of this must be prepared to reinitialise the
3410 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3411 * etc.
3412 *
3413 * Returns 0 if the device function was successfully reset or negative if the
3414 * device doesn't support resetting a single function.
3415 */
3416int __pci_reset_function_locked(struct pci_dev *dev)
3417{
977f857c 3418 return __pci_dev_reset(dev, 0);
6fbf9e7a
KRW
3419}
3420EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3421
711d5779
MT
3422/**
3423 * pci_probe_reset_function - check whether the device can be safely reset
3424 * @dev: PCI device to reset
3425 *
3426 * Some devices allow an individual function to be reset without affecting
3427 * other functions in the same device. The PCI device must be responsive
3428 * to PCI config space in order to use this function.
3429 *
3430 * Returns 0 if the device function can be reset or negative if the
3431 * device doesn't support resetting a single function.
3432 */
3433int pci_probe_reset_function(struct pci_dev *dev)
3434{
3435 return pci_dev_reset(dev, 1);
3436}
3437
8dd7f803 3438/**
8c1c699f
YZ
3439 * pci_reset_function - quiesce and reset a PCI device function
3440 * @dev: PCI device to reset
8dd7f803
SY
3441 *
3442 * Some devices allow an individual function to be reset without affecting
3443 * other functions in the same device. The PCI device must be responsive
3444 * to PCI config space in order to use this function.
3445 *
3446 * This function does not just reset the PCI portion of a device, but
3447 * clears all the state associated with the device. This function differs
8c1c699f 3448 * from __pci_reset_function in that it saves and restores device state
8dd7f803
SY
3449 * over the reset.
3450 *
8c1c699f 3451 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
3452 * device doesn't support resetting a single function.
3453 */
3454int pci_reset_function(struct pci_dev *dev)
3455{
8c1c699f 3456 int rc;
8dd7f803 3457
8c1c699f
YZ
3458 rc = pci_dev_reset(dev, 1);
3459 if (rc)
3460 return rc;
8dd7f803 3461
8dd7f803
SY
3462 pci_save_state(dev);
3463
8c1c699f
YZ
3464 /*
3465 * both INTx and MSI are disabled after the Interrupt Disable bit
3466 * is set and the Bus Master bit is cleared.
3467 */
8dd7f803
SY
3468 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3469
8c1c699f 3470 rc = pci_dev_reset(dev, 0);
8dd7f803
SY
3471
3472 pci_restore_state(dev);
8dd7f803 3473
8c1c699f 3474 return rc;
8dd7f803
SY
3475}
3476EXPORT_SYMBOL_GPL(pci_reset_function);
3477
d556ad4b
PO
3478/**
3479 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3480 * @dev: PCI device to query
3481 *
3482 * Returns mmrbc: maximum designed memory read count in bytes
3483 * or appropriate error value.
3484 */
3485int pcix_get_max_mmrbc(struct pci_dev *dev)
3486{
7c9e2b1c 3487 int cap;
d556ad4b
PO
3488 u32 stat;
3489
3490 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3491 if (!cap)
3492 return -EINVAL;
3493
7c9e2b1c 3494 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
3495 return -EINVAL;
3496
25daeb55 3497 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
3498}
3499EXPORT_SYMBOL(pcix_get_max_mmrbc);
3500
3501/**
3502 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3503 * @dev: PCI device to query
3504 *
3505 * Returns mmrbc: maximum memory read count in bytes
3506 * or appropriate error value.
3507 */
3508int pcix_get_mmrbc(struct pci_dev *dev)
3509{
7c9e2b1c 3510 int cap;
bdc2bda7 3511 u16 cmd;
d556ad4b
PO
3512
3513 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3514 if (!cap)
3515 return -EINVAL;
3516
7c9e2b1c
DN
3517 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3518 return -EINVAL;
d556ad4b 3519
7c9e2b1c 3520 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
3521}
3522EXPORT_SYMBOL(pcix_get_mmrbc);
3523
3524/**
3525 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3526 * @dev: PCI device to query
3527 * @mmrbc: maximum memory read count in bytes
3528 * valid values are 512, 1024, 2048, 4096
3529 *
3530 * If possible sets maximum memory read byte count, some bridges have erratas
3531 * that prevent this.
3532 */
3533int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3534{
7c9e2b1c 3535 int cap;
bdc2bda7
DN
3536 u32 stat, v, o;
3537 u16 cmd;
d556ad4b 3538
229f5afd 3539 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 3540 return -EINVAL;
d556ad4b
PO
3541
3542 v = ffs(mmrbc) - 10;
3543
3544 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3545 if (!cap)
7c9e2b1c 3546 return -EINVAL;
d556ad4b 3547
7c9e2b1c
DN
3548 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3549 return -EINVAL;
d556ad4b
PO
3550
3551 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3552 return -E2BIG;
3553
7c9e2b1c
DN
3554 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3555 return -EINVAL;
d556ad4b
PO
3556
3557 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3558 if (o != v) {
809a3bf9 3559 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
d556ad4b
PO
3560 return -EIO;
3561
3562 cmd &= ~PCI_X_CMD_MAX_READ;
3563 cmd |= v << 2;
7c9e2b1c
DN
3564 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3565 return -EIO;
d556ad4b 3566 }
7c9e2b1c 3567 return 0;
d556ad4b
PO
3568}
3569EXPORT_SYMBOL(pcix_set_mmrbc);
3570
3571/**
3572 * pcie_get_readrq - get PCI Express read request size
3573 * @dev: PCI device to query
3574 *
3575 * Returns maximum memory read request in bytes
3576 * or appropriate error value.
3577 */
3578int pcie_get_readrq(struct pci_dev *dev)
3579{
3580 int ret, cap;
3581 u16 ctl;
3582
06a1cbaf 3583 cap = pci_pcie_cap(dev);
d556ad4b
PO
3584 if (!cap)
3585 return -EINVAL;
3586
3587 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3588 if (!ret)
93e75fab 3589 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
3590
3591 return ret;
3592}
3593EXPORT_SYMBOL(pcie_get_readrq);
3594
3595/**
3596 * pcie_set_readrq - set PCI Express maximum memory read request
3597 * @dev: PCI device to query
42e61f4a 3598 * @rq: maximum memory read count in bytes
d556ad4b
PO
3599 * valid values are 128, 256, 512, 1024, 2048, 4096
3600 *
c9b378c7 3601 * If possible sets maximum memory read request in bytes
d556ad4b
PO
3602 */
3603int pcie_set_readrq(struct pci_dev *dev, int rq)
3604{
3605 int cap, err = -EINVAL;
3606 u16 ctl, v;
3607
229f5afd 3608 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
d556ad4b
PO
3609 goto out;
3610
06a1cbaf 3611 cap = pci_pcie_cap(dev);
d556ad4b
PO
3612 if (!cap)
3613 goto out;
3614
3615 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3616 if (err)
3617 goto out;
a1c473aa
BH
3618 /*
3619 * If using the "performance" PCIe config, we clamp the
3620 * read rq size to the max packet size to prevent the
3621 * host bridge generating requests larger than we can
3622 * cope with
3623 */
3624 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3625 int mps = pcie_get_mps(dev);
3626
3627 if (mps < 0)
3628 return mps;
3629 if (mps < rq)
3630 rq = mps;
3631 }
3632
3633 v = (ffs(rq) - 8) << 12;
d556ad4b
PO
3634
3635 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3636 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3637 ctl |= v;
c9b378c7 3638 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
d556ad4b
PO
3639 }
3640
3641out:
3642 return err;
3643}
3644EXPORT_SYMBOL(pcie_set_readrq);
3645
b03e7495
JM
3646/**
3647 * pcie_get_mps - get PCI Express maximum payload size
3648 * @dev: PCI device to query
3649 *
3650 * Returns maximum payload size in bytes
3651 * or appropriate error value.
3652 */
3653int pcie_get_mps(struct pci_dev *dev)
3654{
3655 int ret, cap;
3656 u16 ctl;
3657
3658 cap = pci_pcie_cap(dev);
3659 if (!cap)
3660 return -EINVAL;
3661
3662 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3663 if (!ret)
3664 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3665
3666 return ret;
3667}
3668
3669/**
3670 * pcie_set_mps - set PCI Express maximum payload size
3671 * @dev: PCI device to query
47c08f31 3672 * @mps: maximum payload size in bytes
b03e7495
JM
3673 * valid values are 128, 256, 512, 1024, 2048, 4096
3674 *
3675 * If possible sets maximum payload size
3676 */
3677int pcie_set_mps(struct pci_dev *dev, int mps)
3678{
3679 int cap, err = -EINVAL;
3680 u16 ctl, v;
3681
3682 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3683 goto out;
3684
3685 v = ffs(mps) - 8;
3686 if (v > dev->pcie_mpss)
3687 goto out;
3688 v <<= 5;
3689
3690 cap = pci_pcie_cap(dev);
3691 if (!cap)
3692 goto out;
3693
3694 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3695 if (err)
3696 goto out;
3697
3698 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3699 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3700 ctl |= v;
3701 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3702 }
3703out:
3704 return err;
3705}
3706
c87deff7
HS
3707/**
3708 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 3709 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
3710 * @flags: resource type mask to be selected
3711 *
3712 * This helper routine makes bar mask from the type of resource.
3713 */
3714int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3715{
3716 int i, bars = 0;
3717 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3718 if (pci_resource_flags(dev, i) & flags)
3719 bars |= (1 << i);
3720 return bars;
3721}
3722
613e7ed6
YZ
3723/**
3724 * pci_resource_bar - get position of the BAR associated with a resource
3725 * @dev: the PCI device
3726 * @resno: the resource number
3727 * @type: the BAR type to be filled in
3728 *
3729 * Returns BAR position in config space, or 0 if the BAR is invalid.
3730 */
3731int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3732{
d1b054da
YZ
3733 int reg;
3734
613e7ed6
YZ
3735 if (resno < PCI_ROM_RESOURCE) {
3736 *type = pci_bar_unknown;
3737 return PCI_BASE_ADDRESS_0 + 4 * resno;
3738 } else if (resno == PCI_ROM_RESOURCE) {
3739 *type = pci_bar_mem32;
3740 return dev->rom_base_reg;
d1b054da
YZ
3741 } else if (resno < PCI_BRIDGE_RESOURCES) {
3742 /* device specific resource */
3743 reg = pci_iov_resource_bar(dev, resno, type);
3744 if (reg)
3745 return reg;
613e7ed6
YZ
3746 }
3747
865df576 3748 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
613e7ed6
YZ
3749 return 0;
3750}
3751
95a8b6ef
MT
3752/* Some architectures require additional programming to enable VGA */
3753static arch_set_vga_state_t arch_set_vga_state;
3754
3755void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3756{
3757 arch_set_vga_state = func; /* NULL disables */
3758}
3759
3760static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
7ad35cf2 3761 unsigned int command_bits, u32 flags)
95a8b6ef
MT
3762{
3763 if (arch_set_vga_state)
3764 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 3765 flags);
95a8b6ef
MT
3766 return 0;
3767}
3768
deb2d2ec
BH
3769/**
3770 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
3771 * @dev: the PCI device
3772 * @decode: true = enable decoding, false = disable decoding
3773 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 3774 * @flags: traverse ancestors and change bridges
3448a19d 3775 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
3776 */
3777int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 3778 unsigned int command_bits, u32 flags)
deb2d2ec
BH
3779{
3780 struct pci_bus *bus;
3781 struct pci_dev *bridge;
3782 u16 cmd;
95a8b6ef 3783 int rc;
deb2d2ec 3784
3448a19d 3785 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 3786
95a8b6ef 3787 /* ARCH specific VGA enables */
3448a19d 3788 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
3789 if (rc)
3790 return rc;
3791
3448a19d
DA
3792 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3793 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3794 if (decode == true)
3795 cmd |= command_bits;
3796 else
3797 cmd &= ~command_bits;
3798 pci_write_config_word(dev, PCI_COMMAND, cmd);
3799 }
deb2d2ec 3800
3448a19d 3801 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
3802 return 0;
3803
3804 bus = dev->bus;
3805 while (bus) {
3806 bridge = bus->self;
3807 if (bridge) {
3808 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3809 &cmd);
3810 if (decode == true)
3811 cmd |= PCI_BRIDGE_CTL_VGA;
3812 else
3813 cmd &= ~PCI_BRIDGE_CTL_VGA;
3814 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3815 cmd);
3816 }
3817 bus = bus->parent;
3818 }
3819 return 0;
3820}
3821
32a9a682
YS
3822#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3823static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 3824static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
3825
3826/**
3827 * pci_specified_resource_alignment - get resource alignment specified by user.
3828 * @dev: the PCI device to get
3829 *
3830 * RETURNS: Resource alignment if it is specified.
3831 * Zero if it is not specified.
3832 */
3833resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3834{
3835 int seg, bus, slot, func, align_order, count;
3836 resource_size_t align = 0;
3837 char *p;
3838
3839 spin_lock(&resource_alignment_lock);
3840 p = resource_alignment_param;
3841 while (*p) {
3842 count = 0;
3843 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3844 p[count] == '@') {
3845 p += count + 1;
3846 } else {
3847 align_order = -1;
3848 }
3849 if (sscanf(p, "%x:%x:%x.%x%n",
3850 &seg, &bus, &slot, &func, &count) != 4) {
3851 seg = 0;
3852 if (sscanf(p, "%x:%x.%x%n",
3853 &bus, &slot, &func, &count) != 3) {
3854 /* Invalid format */
3855 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3856 p);
3857 break;
3858 }
3859 }
3860 p += count;
3861 if (seg == pci_domain_nr(dev->bus) &&
3862 bus == dev->bus->number &&
3863 slot == PCI_SLOT(dev->devfn) &&
3864 func == PCI_FUNC(dev->devfn)) {
3865 if (align_order == -1) {
3866 align = PAGE_SIZE;
3867 } else {
3868 align = 1 << align_order;
3869 }
3870 /* Found */
3871 break;
3872 }
3873 if (*p != ';' && *p != ',') {
3874 /* End of param or invalid format */
3875 break;
3876 }
3877 p++;
3878 }
3879 spin_unlock(&resource_alignment_lock);
3880 return align;
3881}
3882
3883/**
3884 * pci_is_reassigndev - check if specified PCI is target device to reassign
3885 * @dev: the PCI device to check
3886 *
3887 * RETURNS: non-zero for PCI device is a target device to reassign,
3888 * or zero is not.
3889 */
3890int pci_is_reassigndev(struct pci_dev *dev)
3891{
3892 return (pci_specified_resource_alignment(dev) != 0);
3893}
3894
2069ecfb
YL
3895/*
3896 * This function disables memory decoding and releases memory resources
3897 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3898 * It also rounds up size to specified alignment.
3899 * Later on, the kernel will assign page-aligned memory resource back
3900 * to the device.
3901 */
3902void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3903{
3904 int i;
3905 struct resource *r;
3906 resource_size_t align, size;
3907 u16 command;
3908
3909 if (!pci_is_reassigndev(dev))
3910 return;
3911
3912 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3913 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3914 dev_warn(&dev->dev,
3915 "Can't reassign resources to host bridge.\n");
3916 return;
3917 }
3918
3919 dev_info(&dev->dev,
3920 "Disabling memory decoding and releasing memory resources.\n");
3921 pci_read_config_word(dev, PCI_COMMAND, &command);
3922 command &= ~PCI_COMMAND_MEMORY;
3923 pci_write_config_word(dev, PCI_COMMAND, command);
3924
3925 align = pci_specified_resource_alignment(dev);
3926 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3927 r = &dev->resource[i];
3928 if (!(r->flags & IORESOURCE_MEM))
3929 continue;
3930 size = resource_size(r);
3931 if (size < align) {
3932 size = align;
3933 dev_info(&dev->dev,
3934 "Rounding up size of resource #%d to %#llx.\n",
3935 i, (unsigned long long)size);
3936 }
3937 r->end = size - 1;
3938 r->start = 0;
3939 }
3940 /* Need to disable bridge's resource window,
3941 * to enable the kernel to reassign new resource
3942 * window later on.
3943 */
3944 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3945 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3946 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3947 r = &dev->resource[i];
3948 if (!(r->flags & IORESOURCE_MEM))
3949 continue;
3950 r->end = resource_size(r) - 1;
3951 r->start = 0;
3952 }
3953 pci_disable_bridge_window(dev);
3954 }
3955}
3956
32a9a682
YS
3957ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3958{
3959 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3960 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3961 spin_lock(&resource_alignment_lock);
3962 strncpy(resource_alignment_param, buf, count);
3963 resource_alignment_param[count] = '\0';
3964 spin_unlock(&resource_alignment_lock);
3965 return count;
3966}
3967
3968ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3969{
3970 size_t count;
3971 spin_lock(&resource_alignment_lock);
3972 count = snprintf(buf, size, "%s", resource_alignment_param);
3973 spin_unlock(&resource_alignment_lock);
3974 return count;
3975}
3976
3977static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3978{
3979 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3980}
3981
3982static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3983 const char *buf, size_t count)
3984{
3985 return pci_set_resource_alignment_param(buf, count);
3986}
3987
3988BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3989 pci_resource_alignment_store);
3990
3991static int __init pci_resource_alignment_sysfs_init(void)
3992{
3993 return bus_create_file(&pci_bus_type,
3994 &bus_attr_resource_alignment);
3995}
3996
3997late_initcall(pci_resource_alignment_sysfs_init);
3998
32a2eea7
JG
3999static void __devinit pci_no_domains(void)
4000{
4001#ifdef CONFIG_PCI_DOMAINS
4002 pci_domains_supported = 0;
4003#endif
4004}
4005
0ef5f8f6
AP
4006/**
4007 * pci_ext_cfg_enabled - can we access extended PCI config space?
4008 * @dev: The PCI device of the root bridge.
4009 *
4010 * Returns 1 if we can access PCI extended config space (offsets
4011 * greater than 0xff). This is the default implementation. Architecture
4012 * implementations can override this.
4013 */
d6d88c83 4014int __weak pci_ext_cfg_avail(struct pci_dev *dev)
0ef5f8f6
AP
4015{
4016 return 1;
4017}
4018
2d1c8618
BH
4019void __weak pci_fixup_cardbus(struct pci_bus *bus)
4020{
4021}
4022EXPORT_SYMBOL(pci_fixup_cardbus);
4023
ad04d31e 4024static int __init pci_setup(char *str)
1da177e4
LT
4025{
4026 while (str) {
4027 char *k = strchr(str, ',');
4028 if (k)
4029 *k++ = 0;
4030 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
4031 if (!strcmp(str, "nomsi")) {
4032 pci_no_msi();
7f785763
RD
4033 } else if (!strcmp(str, "noaer")) {
4034 pci_no_aer();
b55438fd
YL
4035 } else if (!strncmp(str, "realloc=", 8)) {
4036 pci_realloc_get_opt(str + 8);
f483d392 4037 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 4038 pci_realloc_get_opt("on");
32a2eea7
JG
4039 } else if (!strcmp(str, "nodomains")) {
4040 pci_no_domains();
6748dcc2
RW
4041 } else if (!strncmp(str, "noari", 5)) {
4042 pcie_ari_disabled = true;
4516a618
AN
4043 } else if (!strncmp(str, "cbiosize=", 9)) {
4044 pci_cardbus_io_size = memparse(str + 9, &str);
4045 } else if (!strncmp(str, "cbmemsize=", 10)) {
4046 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
4047 } else if (!strncmp(str, "resource_alignment=", 19)) {
4048 pci_set_resource_alignment_param(str + 19,
4049 strlen(str + 19));
43c16408
AP
4050 } else if (!strncmp(str, "ecrc=", 5)) {
4051 pcie_ecrc_get_policy(str + 5);
28760489
EB
4052 } else if (!strncmp(str, "hpiosize=", 9)) {
4053 pci_hotplug_io_size = memparse(str + 9, &str);
4054 } else if (!strncmp(str, "hpmemsize=", 10)) {
4055 pci_hotplug_mem_size = memparse(str + 10, &str);
5f39e670
JM
4056 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4057 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
4058 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
4059 pcie_bus_config = PCIE_BUS_SAFE;
4060 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
4061 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
4062 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4063 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
4064 } else if (!strncmp(str, "pcie_scan_all", 13)) {
4065 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
4066 } else {
4067 printk(KERN_ERR "PCI: Unknown option `%s'\n",
4068 str);
4069 }
1da177e4
LT
4070 }
4071 str = k;
4072 }
0637a70a 4073 return 0;
1da177e4 4074}
0637a70a 4075early_param("pci", pci_setup);
1da177e4 4076
0b62e13b 4077EXPORT_SYMBOL(pci_reenable_device);
b718989d
BH
4078EXPORT_SYMBOL(pci_enable_device_io);
4079EXPORT_SYMBOL(pci_enable_device_mem);
1da177e4 4080EXPORT_SYMBOL(pci_enable_device);
9ac7849e
TH
4081EXPORT_SYMBOL(pcim_enable_device);
4082EXPORT_SYMBOL(pcim_pin_device);
1da177e4 4083EXPORT_SYMBOL(pci_disable_device);
1da177e4
LT
4084EXPORT_SYMBOL(pci_find_capability);
4085EXPORT_SYMBOL(pci_bus_find_capability);
4086EXPORT_SYMBOL(pci_release_regions);
4087EXPORT_SYMBOL(pci_request_regions);
e8de1481 4088EXPORT_SYMBOL(pci_request_regions_exclusive);
1da177e4
LT
4089EXPORT_SYMBOL(pci_release_region);
4090EXPORT_SYMBOL(pci_request_region);
e8de1481 4091EXPORT_SYMBOL(pci_request_region_exclusive);
c87deff7
HS
4092EXPORT_SYMBOL(pci_release_selected_regions);
4093EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 4094EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
1da177e4 4095EXPORT_SYMBOL(pci_set_master);
6a479079 4096EXPORT_SYMBOL(pci_clear_master);
1da177e4 4097EXPORT_SYMBOL(pci_set_mwi);
694625c0 4098EXPORT_SYMBOL(pci_try_set_mwi);
1da177e4 4099EXPORT_SYMBOL(pci_clear_mwi);
a04ce0ff 4100EXPORT_SYMBOL_GPL(pci_intx);
1da177e4
LT
4101EXPORT_SYMBOL(pci_assign_resource);
4102EXPORT_SYMBOL(pci_find_parent_resource);
c87deff7 4103EXPORT_SYMBOL(pci_select_bars);
1da177e4
LT
4104
4105EXPORT_SYMBOL(pci_set_power_state);
4106EXPORT_SYMBOL(pci_save_state);
4107EXPORT_SYMBOL(pci_restore_state);
e5899e1b 4108EXPORT_SYMBOL(pci_pme_capable);
5a6c9b60 4109EXPORT_SYMBOL(pci_pme_active);
0235c4fc 4110EXPORT_SYMBOL(pci_wake_from_d3);
e5899e1b 4111EXPORT_SYMBOL(pci_target_state);
404cc2d8
RW
4112EXPORT_SYMBOL(pci_prepare_to_sleep);
4113EXPORT_SYMBOL(pci_back_from_sleep);
f7bdd12d 4114EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);