]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/pci/pci.c
PM / core: Drop run_wake flag from struct dev_pm_info
[mirror_ubuntu-artful-kernel.git] / drivers / pci / pci.c
1 /*
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/dmi.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci.h>
18 #include <linux/pm.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/log2.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/pm_wakeup.h>
26 #include <linux/interrupt.h>
27 #include <linux/device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pci_hotplug.h>
30 #include <linux/vmalloc.h>
31 #include <asm/setup.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include "pci.h"
35
36 const char *pci_power_names[] = {
37 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
38 };
39 EXPORT_SYMBOL_GPL(pci_power_names);
40
41 int isa_dma_bridge_buggy;
42 EXPORT_SYMBOL(isa_dma_bridge_buggy);
43
44 int pci_pci_problems;
45 EXPORT_SYMBOL(pci_pci_problems);
46
47 unsigned int pci_pm_d3_delay;
48
49 static void pci_pme_list_scan(struct work_struct *work);
50
51 static LIST_HEAD(pci_pme_list);
52 static DEFINE_MUTEX(pci_pme_list_mutex);
53 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
54
55 struct pci_pme_device {
56 struct list_head list;
57 struct pci_dev *dev;
58 };
59
60 #define PME_TIMEOUT 1000 /* How long between PME checks */
61
62 static void pci_dev_d3_sleep(struct pci_dev *dev)
63 {
64 unsigned int delay = dev->d3_delay;
65
66 if (delay < pci_pm_d3_delay)
67 delay = pci_pm_d3_delay;
68
69 if (delay)
70 msleep(delay);
71 }
72
73 #ifdef CONFIG_PCI_DOMAINS
74 int pci_domains_supported = 1;
75 #endif
76
77 #define DEFAULT_CARDBUS_IO_SIZE (256)
78 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
79 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
80 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
81 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
82
83 #define DEFAULT_HOTPLUG_IO_SIZE (256)
84 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
85 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
86 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
87 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
88
89 #define DEFAULT_HOTPLUG_BUS_SIZE 1
90 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
91
92 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
93
94 /*
95 * The default CLS is used if arch didn't set CLS explicitly and not
96 * all pci devices agree on the same value. Arch can override either
97 * the dfl or actual value as it sees fit. Don't forget this is
98 * measured in 32-bit words, not bytes.
99 */
100 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
101 u8 pci_cache_line_size;
102
103 /*
104 * If we set up a device for bus mastering, we need to check the latency
105 * timer as certain BIOSes forget to set it properly.
106 */
107 unsigned int pcibios_max_latency = 255;
108
109 /* If set, the PCIe ARI capability will not be used. */
110 static bool pcie_ari_disabled;
111
112 /* Disable bridge_d3 for all PCIe ports */
113 static bool pci_bridge_d3_disable;
114 /* Force bridge_d3 for all PCIe ports */
115 static bool pci_bridge_d3_force;
116
117 static int __init pcie_port_pm_setup(char *str)
118 {
119 if (!strcmp(str, "off"))
120 pci_bridge_d3_disable = true;
121 else if (!strcmp(str, "force"))
122 pci_bridge_d3_force = true;
123 return 1;
124 }
125 __setup("pcie_port_pm=", pcie_port_pm_setup);
126
127 /**
128 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
129 * @bus: pointer to PCI bus structure to search
130 *
131 * Given a PCI bus, returns the highest PCI bus number present in the set
132 * including the given PCI bus and its list of child PCI buses.
133 */
134 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
135 {
136 struct pci_bus *tmp;
137 unsigned char max, n;
138
139 max = bus->busn_res.end;
140 list_for_each_entry(tmp, &bus->children, node) {
141 n = pci_bus_max_busnr(tmp);
142 if (n > max)
143 max = n;
144 }
145 return max;
146 }
147 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
148
149 #ifdef CONFIG_HAS_IOMEM
150 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
151 {
152 struct resource *res = &pdev->resource[bar];
153
154 /*
155 * Make sure the BAR is actually a memory resource, not an IO resource
156 */
157 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
158 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
159 return NULL;
160 }
161 return ioremap_nocache(res->start, resource_size(res));
162 }
163 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
164
165 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
166 {
167 /*
168 * Make sure the BAR is actually a memory resource, not an IO resource
169 */
170 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
171 WARN_ON(1);
172 return NULL;
173 }
174 return ioremap_wc(pci_resource_start(pdev, bar),
175 pci_resource_len(pdev, bar));
176 }
177 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
178 #endif
179
180
181 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
182 u8 pos, int cap, int *ttl)
183 {
184 u8 id;
185 u16 ent;
186
187 pci_bus_read_config_byte(bus, devfn, pos, &pos);
188
189 while ((*ttl)--) {
190 if (pos < 0x40)
191 break;
192 pos &= ~3;
193 pci_bus_read_config_word(bus, devfn, pos, &ent);
194
195 id = ent & 0xff;
196 if (id == 0xff)
197 break;
198 if (id == cap)
199 return pos;
200 pos = (ent >> 8);
201 }
202 return 0;
203 }
204
205 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
206 u8 pos, int cap)
207 {
208 int ttl = PCI_FIND_CAP_TTL;
209
210 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
211 }
212
213 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
214 {
215 return __pci_find_next_cap(dev->bus, dev->devfn,
216 pos + PCI_CAP_LIST_NEXT, cap);
217 }
218 EXPORT_SYMBOL_GPL(pci_find_next_capability);
219
220 static int __pci_bus_find_cap_start(struct pci_bus *bus,
221 unsigned int devfn, u8 hdr_type)
222 {
223 u16 status;
224
225 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
226 if (!(status & PCI_STATUS_CAP_LIST))
227 return 0;
228
229 switch (hdr_type) {
230 case PCI_HEADER_TYPE_NORMAL:
231 case PCI_HEADER_TYPE_BRIDGE:
232 return PCI_CAPABILITY_LIST;
233 case PCI_HEADER_TYPE_CARDBUS:
234 return PCI_CB_CAPABILITY_LIST;
235 }
236
237 return 0;
238 }
239
240 /**
241 * pci_find_capability - query for devices' capabilities
242 * @dev: PCI device to query
243 * @cap: capability code
244 *
245 * Tell if a device supports a given PCI capability.
246 * Returns the address of the requested capability structure within the
247 * device's PCI configuration space or 0 in case the device does not
248 * support it. Possible values for @cap:
249 *
250 * %PCI_CAP_ID_PM Power Management
251 * %PCI_CAP_ID_AGP Accelerated Graphics Port
252 * %PCI_CAP_ID_VPD Vital Product Data
253 * %PCI_CAP_ID_SLOTID Slot Identification
254 * %PCI_CAP_ID_MSI Message Signalled Interrupts
255 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
256 * %PCI_CAP_ID_PCIX PCI-X
257 * %PCI_CAP_ID_EXP PCI Express
258 */
259 int pci_find_capability(struct pci_dev *dev, int cap)
260 {
261 int pos;
262
263 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
264 if (pos)
265 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
266
267 return pos;
268 }
269 EXPORT_SYMBOL(pci_find_capability);
270
271 /**
272 * pci_bus_find_capability - query for devices' capabilities
273 * @bus: the PCI bus to query
274 * @devfn: PCI device to query
275 * @cap: capability code
276 *
277 * Like pci_find_capability() but works for pci devices that do not have a
278 * pci_dev structure set up yet.
279 *
280 * Returns the address of the requested capability structure within the
281 * device's PCI configuration space or 0 in case the device does not
282 * support it.
283 */
284 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
285 {
286 int pos;
287 u8 hdr_type;
288
289 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
290
291 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
292 if (pos)
293 pos = __pci_find_next_cap(bus, devfn, pos, cap);
294
295 return pos;
296 }
297 EXPORT_SYMBOL(pci_bus_find_capability);
298
299 /**
300 * pci_find_next_ext_capability - Find an extended capability
301 * @dev: PCI device to query
302 * @start: address at which to start looking (0 to start at beginning of list)
303 * @cap: capability code
304 *
305 * Returns the address of the next matching extended capability structure
306 * within the device's PCI configuration space or 0 if the device does
307 * not support it. Some capabilities can occur several times, e.g., the
308 * vendor-specific capability, and this provides a way to find them all.
309 */
310 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
311 {
312 u32 header;
313 int ttl;
314 int pos = PCI_CFG_SPACE_SIZE;
315
316 /* minimum 8 bytes per capability */
317 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
318
319 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
320 return 0;
321
322 if (start)
323 pos = start;
324
325 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
326 return 0;
327
328 /*
329 * If we have no capabilities, this is indicated by cap ID,
330 * cap version and next pointer all being 0.
331 */
332 if (header == 0)
333 return 0;
334
335 while (ttl-- > 0) {
336 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
337 return pos;
338
339 pos = PCI_EXT_CAP_NEXT(header);
340 if (pos < PCI_CFG_SPACE_SIZE)
341 break;
342
343 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
344 break;
345 }
346
347 return 0;
348 }
349 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
350
351 /**
352 * pci_find_ext_capability - Find an extended capability
353 * @dev: PCI device to query
354 * @cap: capability code
355 *
356 * Returns the address of the requested extended capability structure
357 * within the device's PCI configuration space or 0 if the device does
358 * not support it. Possible values for @cap:
359 *
360 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
361 * %PCI_EXT_CAP_ID_VC Virtual Channel
362 * %PCI_EXT_CAP_ID_DSN Device Serial Number
363 * %PCI_EXT_CAP_ID_PWR Power Budgeting
364 */
365 int pci_find_ext_capability(struct pci_dev *dev, int cap)
366 {
367 return pci_find_next_ext_capability(dev, 0, cap);
368 }
369 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
370
371 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
372 {
373 int rc, ttl = PCI_FIND_CAP_TTL;
374 u8 cap, mask;
375
376 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
377 mask = HT_3BIT_CAP_MASK;
378 else
379 mask = HT_5BIT_CAP_MASK;
380
381 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
382 PCI_CAP_ID_HT, &ttl);
383 while (pos) {
384 rc = pci_read_config_byte(dev, pos + 3, &cap);
385 if (rc != PCIBIOS_SUCCESSFUL)
386 return 0;
387
388 if ((cap & mask) == ht_cap)
389 return pos;
390
391 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
392 pos + PCI_CAP_LIST_NEXT,
393 PCI_CAP_ID_HT, &ttl);
394 }
395
396 return 0;
397 }
398 /**
399 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
400 * @dev: PCI device to query
401 * @pos: Position from which to continue searching
402 * @ht_cap: Hypertransport capability code
403 *
404 * To be used in conjunction with pci_find_ht_capability() to search for
405 * all capabilities matching @ht_cap. @pos should always be a value returned
406 * from pci_find_ht_capability().
407 *
408 * NB. To be 100% safe against broken PCI devices, the caller should take
409 * steps to avoid an infinite loop.
410 */
411 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
412 {
413 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
414 }
415 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
416
417 /**
418 * pci_find_ht_capability - query a device's Hypertransport capabilities
419 * @dev: PCI device to query
420 * @ht_cap: Hypertransport capability code
421 *
422 * Tell if a device supports a given Hypertransport capability.
423 * Returns an address within the device's PCI configuration space
424 * or 0 in case the device does not support the request capability.
425 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
426 * which has a Hypertransport capability matching @ht_cap.
427 */
428 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
429 {
430 int pos;
431
432 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
433 if (pos)
434 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
435
436 return pos;
437 }
438 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
439
440 /**
441 * pci_find_parent_resource - return resource region of parent bus of given region
442 * @dev: PCI device structure contains resources to be searched
443 * @res: child resource record for which parent is sought
444 *
445 * For given resource region of given device, return the resource
446 * region of parent bus the given region is contained in.
447 */
448 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
449 struct resource *res)
450 {
451 const struct pci_bus *bus = dev->bus;
452 struct resource *r;
453 int i;
454
455 pci_bus_for_each_resource(bus, r, i) {
456 if (!r)
457 continue;
458 if (res->start && resource_contains(r, res)) {
459
460 /*
461 * If the window is prefetchable but the BAR is
462 * not, the allocator made a mistake.
463 */
464 if (r->flags & IORESOURCE_PREFETCH &&
465 !(res->flags & IORESOURCE_PREFETCH))
466 return NULL;
467
468 /*
469 * If we're below a transparent bridge, there may
470 * be both a positively-decoded aperture and a
471 * subtractively-decoded region that contain the BAR.
472 * We want the positively-decoded one, so this depends
473 * on pci_bus_for_each_resource() giving us those
474 * first.
475 */
476 return r;
477 }
478 }
479 return NULL;
480 }
481 EXPORT_SYMBOL(pci_find_parent_resource);
482
483 /**
484 * pci_find_resource - Return matching PCI device resource
485 * @dev: PCI device to query
486 * @res: Resource to look for
487 *
488 * Goes over standard PCI resources (BARs) and checks if the given resource
489 * is partially or fully contained in any of them. In that case the
490 * matching resource is returned, %NULL otherwise.
491 */
492 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
493 {
494 int i;
495
496 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
497 struct resource *r = &dev->resource[i];
498
499 if (r->start && resource_contains(r, res))
500 return r;
501 }
502
503 return NULL;
504 }
505 EXPORT_SYMBOL(pci_find_resource);
506
507 /**
508 * pci_find_pcie_root_port - return PCIe Root Port
509 * @dev: PCI device to query
510 *
511 * Traverse up the parent chain and return the PCIe Root Port PCI Device
512 * for a given PCI Device.
513 */
514 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
515 {
516 struct pci_dev *bridge, *highest_pcie_bridge = NULL;
517
518 bridge = pci_upstream_bridge(dev);
519 while (bridge && pci_is_pcie(bridge)) {
520 highest_pcie_bridge = bridge;
521 bridge = pci_upstream_bridge(bridge);
522 }
523
524 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
525 return NULL;
526
527 return highest_pcie_bridge;
528 }
529 EXPORT_SYMBOL(pci_find_pcie_root_port);
530
531 /**
532 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
533 * @dev: the PCI device to operate on
534 * @pos: config space offset of status word
535 * @mask: mask of bit(s) to care about in status word
536 *
537 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
538 */
539 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
540 {
541 int i;
542
543 /* Wait for Transaction Pending bit clean */
544 for (i = 0; i < 4; i++) {
545 u16 status;
546 if (i)
547 msleep((1 << (i - 1)) * 100);
548
549 pci_read_config_word(dev, pos, &status);
550 if (!(status & mask))
551 return 1;
552 }
553
554 return 0;
555 }
556
557 /**
558 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
559 * @dev: PCI device to have its BARs restored
560 *
561 * Restore the BAR values for a given device, so as to make it
562 * accessible by its driver.
563 */
564 static void pci_restore_bars(struct pci_dev *dev)
565 {
566 int i;
567
568 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
569 pci_update_resource(dev, i);
570 }
571
572 static const struct pci_platform_pm_ops *pci_platform_pm;
573
574 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
575 {
576 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
577 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
578 return -EINVAL;
579 pci_platform_pm = ops;
580 return 0;
581 }
582
583 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
584 {
585 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
586 }
587
588 static inline int platform_pci_set_power_state(struct pci_dev *dev,
589 pci_power_t t)
590 {
591 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
592 }
593
594 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
595 {
596 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
597 }
598
599 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
600 {
601 return pci_platform_pm ?
602 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
603 }
604
605 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
606 {
607 return pci_platform_pm ?
608 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
609 }
610
611 static inline bool platform_pci_need_resume(struct pci_dev *dev)
612 {
613 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
614 }
615
616 /**
617 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
618 * given PCI device
619 * @dev: PCI device to handle.
620 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
621 *
622 * RETURN VALUE:
623 * -EINVAL if the requested state is invalid.
624 * -EIO if device does not support PCI PM or its PM capabilities register has a
625 * wrong version, or device doesn't support the requested state.
626 * 0 if device already is in the requested state.
627 * 0 if device's power state has been successfully changed.
628 */
629 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
630 {
631 u16 pmcsr;
632 bool need_restore = false;
633
634 /* Check if we're already there */
635 if (dev->current_state == state)
636 return 0;
637
638 if (!dev->pm_cap)
639 return -EIO;
640
641 if (state < PCI_D0 || state > PCI_D3hot)
642 return -EINVAL;
643
644 /* Validate current state:
645 * Can enter D0 from any state, but if we can only go deeper
646 * to sleep if we're already in a low power state
647 */
648 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
649 && dev->current_state > state) {
650 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
651 dev->current_state, state);
652 return -EINVAL;
653 }
654
655 /* check if this device supports the desired state */
656 if ((state == PCI_D1 && !dev->d1_support)
657 || (state == PCI_D2 && !dev->d2_support))
658 return -EIO;
659
660 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
661
662 /* If we're (effectively) in D3, force entire word to 0.
663 * This doesn't affect PME_Status, disables PME_En, and
664 * sets PowerState to 0.
665 */
666 switch (dev->current_state) {
667 case PCI_D0:
668 case PCI_D1:
669 case PCI_D2:
670 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
671 pmcsr |= state;
672 break;
673 case PCI_D3hot:
674 case PCI_D3cold:
675 case PCI_UNKNOWN: /* Boot-up */
676 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
677 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
678 need_restore = true;
679 /* Fall-through: force to D0 */
680 default:
681 pmcsr = 0;
682 break;
683 }
684
685 /* enter specified state */
686 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
687
688 /* Mandatory power management transition delays */
689 /* see PCI PM 1.1 5.6.1 table 18 */
690 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
691 pci_dev_d3_sleep(dev);
692 else if (state == PCI_D2 || dev->current_state == PCI_D2)
693 udelay(PCI_PM_D2_DELAY);
694
695 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
696 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
697 if (dev->current_state != state && printk_ratelimit())
698 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
699 dev->current_state);
700
701 /*
702 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
703 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
704 * from D3hot to D0 _may_ perform an internal reset, thereby
705 * going to "D0 Uninitialized" rather than "D0 Initialized".
706 * For example, at least some versions of the 3c905B and the
707 * 3c556B exhibit this behaviour.
708 *
709 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
710 * devices in a D3hot state at boot. Consequently, we need to
711 * restore at least the BARs so that the device will be
712 * accessible to its driver.
713 */
714 if (need_restore)
715 pci_restore_bars(dev);
716
717 if (dev->bus->self)
718 pcie_aspm_pm_state_change(dev->bus->self);
719
720 return 0;
721 }
722
723 /**
724 * pci_update_current_state - Read power state of given device and cache it
725 * @dev: PCI device to handle.
726 * @state: State to cache in case the device doesn't have the PM capability
727 *
728 * The power state is read from the PMCSR register, which however is
729 * inaccessible in D3cold. The platform firmware is therefore queried first
730 * to detect accessibility of the register. In case the platform firmware
731 * reports an incorrect state or the device isn't power manageable by the
732 * platform at all, we try to detect D3cold by testing accessibility of the
733 * vendor ID in config space.
734 */
735 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
736 {
737 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
738 !pci_device_is_present(dev)) {
739 dev->current_state = PCI_D3cold;
740 } else if (dev->pm_cap) {
741 u16 pmcsr;
742
743 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
744 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
745 } else {
746 dev->current_state = state;
747 }
748 }
749
750 /**
751 * pci_power_up - Put the given device into D0 forcibly
752 * @dev: PCI device to power up
753 */
754 void pci_power_up(struct pci_dev *dev)
755 {
756 if (platform_pci_power_manageable(dev))
757 platform_pci_set_power_state(dev, PCI_D0);
758
759 pci_raw_set_power_state(dev, PCI_D0);
760 pci_update_current_state(dev, PCI_D0);
761 }
762
763 /**
764 * pci_platform_power_transition - Use platform to change device power state
765 * @dev: PCI device to handle.
766 * @state: State to put the device into.
767 */
768 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
769 {
770 int error;
771
772 if (platform_pci_power_manageable(dev)) {
773 error = platform_pci_set_power_state(dev, state);
774 if (!error)
775 pci_update_current_state(dev, state);
776 } else
777 error = -ENODEV;
778
779 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
780 dev->current_state = PCI_D0;
781
782 return error;
783 }
784
785 /**
786 * pci_wakeup - Wake up a PCI device
787 * @pci_dev: Device to handle.
788 * @ign: ignored parameter
789 */
790 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
791 {
792 pci_wakeup_event(pci_dev);
793 pm_request_resume(&pci_dev->dev);
794 return 0;
795 }
796
797 /**
798 * pci_wakeup_bus - Walk given bus and wake up devices on it
799 * @bus: Top bus of the subtree to walk.
800 */
801 static void pci_wakeup_bus(struct pci_bus *bus)
802 {
803 if (bus)
804 pci_walk_bus(bus, pci_wakeup, NULL);
805 }
806
807 /**
808 * __pci_start_power_transition - Start power transition of a PCI device
809 * @dev: PCI device to handle.
810 * @state: State to put the device into.
811 */
812 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
813 {
814 if (state == PCI_D0) {
815 pci_platform_power_transition(dev, PCI_D0);
816 /*
817 * Mandatory power management transition delays, see
818 * PCI Express Base Specification Revision 2.0 Section
819 * 6.6.1: Conventional Reset. Do not delay for
820 * devices powered on/off by corresponding bridge,
821 * because have already delayed for the bridge.
822 */
823 if (dev->runtime_d3cold) {
824 if (dev->d3cold_delay)
825 msleep(dev->d3cold_delay);
826 /*
827 * When powering on a bridge from D3cold, the
828 * whole hierarchy may be powered on into
829 * D0uninitialized state, resume them to give
830 * them a chance to suspend again
831 */
832 pci_wakeup_bus(dev->subordinate);
833 }
834 }
835 }
836
837 /**
838 * __pci_dev_set_current_state - Set current state of a PCI device
839 * @dev: Device to handle
840 * @data: pointer to state to be set
841 */
842 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
843 {
844 pci_power_t state = *(pci_power_t *)data;
845
846 dev->current_state = state;
847 return 0;
848 }
849
850 /**
851 * __pci_bus_set_current_state - Walk given bus and set current state of devices
852 * @bus: Top bus of the subtree to walk.
853 * @state: state to be set
854 */
855 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
856 {
857 if (bus)
858 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
859 }
860
861 /**
862 * __pci_complete_power_transition - Complete power transition of a PCI device
863 * @dev: PCI device to handle.
864 * @state: State to put the device into.
865 *
866 * This function should not be called directly by device drivers.
867 */
868 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
869 {
870 int ret;
871
872 if (state <= PCI_D0)
873 return -EINVAL;
874 ret = pci_platform_power_transition(dev, state);
875 /* Power off the bridge may power off the whole hierarchy */
876 if (!ret && state == PCI_D3cold)
877 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
878 return ret;
879 }
880 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
881
882 /**
883 * pci_set_power_state - Set the power state of a PCI device
884 * @dev: PCI device to handle.
885 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
886 *
887 * Transition a device to a new power state, using the platform firmware and/or
888 * the device's PCI PM registers.
889 *
890 * RETURN VALUE:
891 * -EINVAL if the requested state is invalid.
892 * -EIO if device does not support PCI PM or its PM capabilities register has a
893 * wrong version, or device doesn't support the requested state.
894 * 0 if device already is in the requested state.
895 * 0 if device's power state has been successfully changed.
896 */
897 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
898 {
899 int error;
900
901 /* bound the state we're entering */
902 if (state > PCI_D3cold)
903 state = PCI_D3cold;
904 else if (state < PCI_D0)
905 state = PCI_D0;
906 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
907 /*
908 * If the device or the parent bridge do not support PCI PM,
909 * ignore the request if we're doing anything other than putting
910 * it into D0 (which would only happen on boot).
911 */
912 return 0;
913
914 /* Check if we're already there */
915 if (dev->current_state == state)
916 return 0;
917
918 __pci_start_power_transition(dev, state);
919
920 /* This device is quirked not to be put into D3, so
921 don't put it in D3 */
922 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
923 return 0;
924
925 /*
926 * To put device in D3cold, we put device into D3hot in native
927 * way, then put device into D3cold with platform ops
928 */
929 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
930 PCI_D3hot : state);
931
932 if (!__pci_complete_power_transition(dev, state))
933 error = 0;
934
935 return error;
936 }
937 EXPORT_SYMBOL(pci_set_power_state);
938
939 /**
940 * pci_choose_state - Choose the power state of a PCI device
941 * @dev: PCI device to be suspended
942 * @state: target sleep state for the whole system. This is the value
943 * that is passed to suspend() function.
944 *
945 * Returns PCI power state suitable for given device and given system
946 * message.
947 */
948
949 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
950 {
951 pci_power_t ret;
952
953 if (!dev->pm_cap)
954 return PCI_D0;
955
956 ret = platform_pci_choose_state(dev);
957 if (ret != PCI_POWER_ERROR)
958 return ret;
959
960 switch (state.event) {
961 case PM_EVENT_ON:
962 return PCI_D0;
963 case PM_EVENT_FREEZE:
964 case PM_EVENT_PRETHAW:
965 /* REVISIT both freeze and pre-thaw "should" use D0 */
966 case PM_EVENT_SUSPEND:
967 case PM_EVENT_HIBERNATE:
968 return PCI_D3hot;
969 default:
970 dev_info(&dev->dev, "unrecognized suspend event %d\n",
971 state.event);
972 BUG();
973 }
974 return PCI_D0;
975 }
976 EXPORT_SYMBOL(pci_choose_state);
977
978 #define PCI_EXP_SAVE_REGS 7
979
980 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
981 u16 cap, bool extended)
982 {
983 struct pci_cap_saved_state *tmp;
984
985 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
986 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
987 return tmp;
988 }
989 return NULL;
990 }
991
992 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
993 {
994 return _pci_find_saved_cap(dev, cap, false);
995 }
996
997 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
998 {
999 return _pci_find_saved_cap(dev, cap, true);
1000 }
1001
1002 static int pci_save_pcie_state(struct pci_dev *dev)
1003 {
1004 int i = 0;
1005 struct pci_cap_saved_state *save_state;
1006 u16 *cap;
1007
1008 if (!pci_is_pcie(dev))
1009 return 0;
1010
1011 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1012 if (!save_state) {
1013 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1014 return -ENOMEM;
1015 }
1016
1017 cap = (u16 *)&save_state->cap.data[0];
1018 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1019 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1020 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1021 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1022 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1023 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1024 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1025
1026 return 0;
1027 }
1028
1029 static void pci_restore_pcie_state(struct pci_dev *dev)
1030 {
1031 int i = 0;
1032 struct pci_cap_saved_state *save_state;
1033 u16 *cap;
1034
1035 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1036 if (!save_state)
1037 return;
1038
1039 cap = (u16 *)&save_state->cap.data[0];
1040 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1041 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1042 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1043 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1044 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1045 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1046 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1047 }
1048
1049
1050 static int pci_save_pcix_state(struct pci_dev *dev)
1051 {
1052 int pos;
1053 struct pci_cap_saved_state *save_state;
1054
1055 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1056 if (!pos)
1057 return 0;
1058
1059 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1060 if (!save_state) {
1061 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1062 return -ENOMEM;
1063 }
1064
1065 pci_read_config_word(dev, pos + PCI_X_CMD,
1066 (u16 *)save_state->cap.data);
1067
1068 return 0;
1069 }
1070
1071 static void pci_restore_pcix_state(struct pci_dev *dev)
1072 {
1073 int i = 0, pos;
1074 struct pci_cap_saved_state *save_state;
1075 u16 *cap;
1076
1077 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1078 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1079 if (!save_state || !pos)
1080 return;
1081 cap = (u16 *)&save_state->cap.data[0];
1082
1083 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1084 }
1085
1086
1087 /**
1088 * pci_save_state - save the PCI configuration space of a device before suspending
1089 * @dev: - PCI device that we're dealing with
1090 */
1091 int pci_save_state(struct pci_dev *dev)
1092 {
1093 int i;
1094 /* XXX: 100% dword access ok here? */
1095 for (i = 0; i < 16; i++)
1096 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1097 dev->state_saved = true;
1098
1099 i = pci_save_pcie_state(dev);
1100 if (i != 0)
1101 return i;
1102
1103 i = pci_save_pcix_state(dev);
1104 if (i != 0)
1105 return i;
1106
1107 return pci_save_vc_state(dev);
1108 }
1109 EXPORT_SYMBOL(pci_save_state);
1110
1111 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1112 u32 saved_val, int retry)
1113 {
1114 u32 val;
1115
1116 pci_read_config_dword(pdev, offset, &val);
1117 if (val == saved_val)
1118 return;
1119
1120 for (;;) {
1121 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1122 offset, val, saved_val);
1123 pci_write_config_dword(pdev, offset, saved_val);
1124 if (retry-- <= 0)
1125 return;
1126
1127 pci_read_config_dword(pdev, offset, &val);
1128 if (val == saved_val)
1129 return;
1130
1131 mdelay(1);
1132 }
1133 }
1134
1135 static void pci_restore_config_space_range(struct pci_dev *pdev,
1136 int start, int end, int retry)
1137 {
1138 int index;
1139
1140 for (index = end; index >= start; index--)
1141 pci_restore_config_dword(pdev, 4 * index,
1142 pdev->saved_config_space[index],
1143 retry);
1144 }
1145
1146 static void pci_restore_config_space(struct pci_dev *pdev)
1147 {
1148 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1149 pci_restore_config_space_range(pdev, 10, 15, 0);
1150 /* Restore BARs before the command register. */
1151 pci_restore_config_space_range(pdev, 4, 9, 10);
1152 pci_restore_config_space_range(pdev, 0, 3, 0);
1153 } else {
1154 pci_restore_config_space_range(pdev, 0, 15, 0);
1155 }
1156 }
1157
1158 /**
1159 * pci_restore_state - Restore the saved state of a PCI device
1160 * @dev: - PCI device that we're dealing with
1161 */
1162 void pci_restore_state(struct pci_dev *dev)
1163 {
1164 if (!dev->state_saved)
1165 return;
1166
1167 /* PCI Express register must be restored first */
1168 pci_restore_pcie_state(dev);
1169 pci_restore_ats_state(dev);
1170 pci_restore_vc_state(dev);
1171
1172 pci_cleanup_aer_error_status_regs(dev);
1173
1174 pci_restore_config_space(dev);
1175
1176 pci_restore_pcix_state(dev);
1177 pci_restore_msi_state(dev);
1178
1179 /* Restore ACS and IOV configuration state */
1180 pci_enable_acs(dev);
1181 pci_restore_iov_state(dev);
1182
1183 dev->state_saved = false;
1184 }
1185 EXPORT_SYMBOL(pci_restore_state);
1186
1187 struct pci_saved_state {
1188 u32 config_space[16];
1189 struct pci_cap_saved_data cap[0];
1190 };
1191
1192 /**
1193 * pci_store_saved_state - Allocate and return an opaque struct containing
1194 * the device saved state.
1195 * @dev: PCI device that we're dealing with
1196 *
1197 * Return NULL if no state or error.
1198 */
1199 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1200 {
1201 struct pci_saved_state *state;
1202 struct pci_cap_saved_state *tmp;
1203 struct pci_cap_saved_data *cap;
1204 size_t size;
1205
1206 if (!dev->state_saved)
1207 return NULL;
1208
1209 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1210
1211 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1212 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1213
1214 state = kzalloc(size, GFP_KERNEL);
1215 if (!state)
1216 return NULL;
1217
1218 memcpy(state->config_space, dev->saved_config_space,
1219 sizeof(state->config_space));
1220
1221 cap = state->cap;
1222 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1223 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1224 memcpy(cap, &tmp->cap, len);
1225 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1226 }
1227 /* Empty cap_save terminates list */
1228
1229 return state;
1230 }
1231 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1232
1233 /**
1234 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1235 * @dev: PCI device that we're dealing with
1236 * @state: Saved state returned from pci_store_saved_state()
1237 */
1238 int pci_load_saved_state(struct pci_dev *dev,
1239 struct pci_saved_state *state)
1240 {
1241 struct pci_cap_saved_data *cap;
1242
1243 dev->state_saved = false;
1244
1245 if (!state)
1246 return 0;
1247
1248 memcpy(dev->saved_config_space, state->config_space,
1249 sizeof(state->config_space));
1250
1251 cap = state->cap;
1252 while (cap->size) {
1253 struct pci_cap_saved_state *tmp;
1254
1255 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1256 if (!tmp || tmp->cap.size != cap->size)
1257 return -EINVAL;
1258
1259 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1260 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1261 sizeof(struct pci_cap_saved_data) + cap->size);
1262 }
1263
1264 dev->state_saved = true;
1265 return 0;
1266 }
1267 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1268
1269 /**
1270 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1271 * and free the memory allocated for it.
1272 * @dev: PCI device that we're dealing with
1273 * @state: Pointer to saved state returned from pci_store_saved_state()
1274 */
1275 int pci_load_and_free_saved_state(struct pci_dev *dev,
1276 struct pci_saved_state **state)
1277 {
1278 int ret = pci_load_saved_state(dev, *state);
1279 kfree(*state);
1280 *state = NULL;
1281 return ret;
1282 }
1283 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1284
1285 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1286 {
1287 return pci_enable_resources(dev, bars);
1288 }
1289
1290 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1291 {
1292 int err;
1293 struct pci_dev *bridge;
1294 u16 cmd;
1295 u8 pin;
1296
1297 err = pci_set_power_state(dev, PCI_D0);
1298 if (err < 0 && err != -EIO)
1299 return err;
1300
1301 bridge = pci_upstream_bridge(dev);
1302 if (bridge)
1303 pcie_aspm_powersave_config_link(bridge);
1304
1305 err = pcibios_enable_device(dev, bars);
1306 if (err < 0)
1307 return err;
1308 pci_fixup_device(pci_fixup_enable, dev);
1309
1310 if (dev->msi_enabled || dev->msix_enabled)
1311 return 0;
1312
1313 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1314 if (pin) {
1315 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1316 if (cmd & PCI_COMMAND_INTX_DISABLE)
1317 pci_write_config_word(dev, PCI_COMMAND,
1318 cmd & ~PCI_COMMAND_INTX_DISABLE);
1319 }
1320
1321 return 0;
1322 }
1323
1324 /**
1325 * pci_reenable_device - Resume abandoned device
1326 * @dev: PCI device to be resumed
1327 *
1328 * Note this function is a backend of pci_default_resume and is not supposed
1329 * to be called by normal code, write proper resume handler and use it instead.
1330 */
1331 int pci_reenable_device(struct pci_dev *dev)
1332 {
1333 if (pci_is_enabled(dev))
1334 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1335 return 0;
1336 }
1337 EXPORT_SYMBOL(pci_reenable_device);
1338
1339 static void pci_enable_bridge(struct pci_dev *dev)
1340 {
1341 struct pci_dev *bridge;
1342 int retval;
1343
1344 bridge = pci_upstream_bridge(dev);
1345 if (bridge)
1346 pci_enable_bridge(bridge);
1347
1348 if (pci_is_enabled(dev)) {
1349 if (!dev->is_busmaster)
1350 pci_set_master(dev);
1351 return;
1352 }
1353
1354 retval = pci_enable_device(dev);
1355 if (retval)
1356 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1357 retval);
1358 pci_set_master(dev);
1359 }
1360
1361 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1362 {
1363 struct pci_dev *bridge;
1364 int err;
1365 int i, bars = 0;
1366
1367 /*
1368 * Power state could be unknown at this point, either due to a fresh
1369 * boot or a device removal call. So get the current power state
1370 * so that things like MSI message writing will behave as expected
1371 * (e.g. if the device really is in D0 at enable time).
1372 */
1373 if (dev->pm_cap) {
1374 u16 pmcsr;
1375 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1376 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1377 }
1378
1379 if (atomic_inc_return(&dev->enable_cnt) > 1)
1380 return 0; /* already enabled */
1381
1382 bridge = pci_upstream_bridge(dev);
1383 if (bridge)
1384 pci_enable_bridge(bridge);
1385
1386 /* only skip sriov related */
1387 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1388 if (dev->resource[i].flags & flags)
1389 bars |= (1 << i);
1390 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1391 if (dev->resource[i].flags & flags)
1392 bars |= (1 << i);
1393
1394 err = do_pci_enable_device(dev, bars);
1395 if (err < 0)
1396 atomic_dec(&dev->enable_cnt);
1397 return err;
1398 }
1399
1400 /**
1401 * pci_enable_device_io - Initialize a device for use with IO space
1402 * @dev: PCI device to be initialized
1403 *
1404 * Initialize device before it's used by a driver. Ask low-level code
1405 * to enable I/O resources. Wake up the device if it was suspended.
1406 * Beware, this function can fail.
1407 */
1408 int pci_enable_device_io(struct pci_dev *dev)
1409 {
1410 return pci_enable_device_flags(dev, IORESOURCE_IO);
1411 }
1412 EXPORT_SYMBOL(pci_enable_device_io);
1413
1414 /**
1415 * pci_enable_device_mem - Initialize a device for use with Memory space
1416 * @dev: PCI device to be initialized
1417 *
1418 * Initialize device before it's used by a driver. Ask low-level code
1419 * to enable Memory resources. Wake up the device if it was suspended.
1420 * Beware, this function can fail.
1421 */
1422 int pci_enable_device_mem(struct pci_dev *dev)
1423 {
1424 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1425 }
1426 EXPORT_SYMBOL(pci_enable_device_mem);
1427
1428 /**
1429 * pci_enable_device - Initialize device before it's used by a driver.
1430 * @dev: PCI device to be initialized
1431 *
1432 * Initialize device before it's used by a driver. Ask low-level code
1433 * to enable I/O and memory. Wake up the device if it was suspended.
1434 * Beware, this function can fail.
1435 *
1436 * Note we don't actually enable the device many times if we call
1437 * this function repeatedly (we just increment the count).
1438 */
1439 int pci_enable_device(struct pci_dev *dev)
1440 {
1441 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1442 }
1443 EXPORT_SYMBOL(pci_enable_device);
1444
1445 /*
1446 * Managed PCI resources. This manages device on/off, intx/msi/msix
1447 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1448 * there's no need to track it separately. pci_devres is initialized
1449 * when a device is enabled using managed PCI device enable interface.
1450 */
1451 struct pci_devres {
1452 unsigned int enabled:1;
1453 unsigned int pinned:1;
1454 unsigned int orig_intx:1;
1455 unsigned int restore_intx:1;
1456 u32 region_mask;
1457 };
1458
1459 static void pcim_release(struct device *gendev, void *res)
1460 {
1461 struct pci_dev *dev = to_pci_dev(gendev);
1462 struct pci_devres *this = res;
1463 int i;
1464
1465 if (dev->msi_enabled)
1466 pci_disable_msi(dev);
1467 if (dev->msix_enabled)
1468 pci_disable_msix(dev);
1469
1470 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1471 if (this->region_mask & (1 << i))
1472 pci_release_region(dev, i);
1473
1474 if (this->restore_intx)
1475 pci_intx(dev, this->orig_intx);
1476
1477 if (this->enabled && !this->pinned)
1478 pci_disable_device(dev);
1479 }
1480
1481 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1482 {
1483 struct pci_devres *dr, *new_dr;
1484
1485 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1486 if (dr)
1487 return dr;
1488
1489 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1490 if (!new_dr)
1491 return NULL;
1492 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1493 }
1494
1495 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1496 {
1497 if (pci_is_managed(pdev))
1498 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1499 return NULL;
1500 }
1501
1502 /**
1503 * pcim_enable_device - Managed pci_enable_device()
1504 * @pdev: PCI device to be initialized
1505 *
1506 * Managed pci_enable_device().
1507 */
1508 int pcim_enable_device(struct pci_dev *pdev)
1509 {
1510 struct pci_devres *dr;
1511 int rc;
1512
1513 dr = get_pci_dr(pdev);
1514 if (unlikely(!dr))
1515 return -ENOMEM;
1516 if (dr->enabled)
1517 return 0;
1518
1519 rc = pci_enable_device(pdev);
1520 if (!rc) {
1521 pdev->is_managed = 1;
1522 dr->enabled = 1;
1523 }
1524 return rc;
1525 }
1526 EXPORT_SYMBOL(pcim_enable_device);
1527
1528 /**
1529 * pcim_pin_device - Pin managed PCI device
1530 * @pdev: PCI device to pin
1531 *
1532 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1533 * driver detach. @pdev must have been enabled with
1534 * pcim_enable_device().
1535 */
1536 void pcim_pin_device(struct pci_dev *pdev)
1537 {
1538 struct pci_devres *dr;
1539
1540 dr = find_pci_dr(pdev);
1541 WARN_ON(!dr || !dr->enabled);
1542 if (dr)
1543 dr->pinned = 1;
1544 }
1545 EXPORT_SYMBOL(pcim_pin_device);
1546
1547 /*
1548 * pcibios_add_device - provide arch specific hooks when adding device dev
1549 * @dev: the PCI device being added
1550 *
1551 * Permits the platform to provide architecture specific functionality when
1552 * devices are added. This is the default implementation. Architecture
1553 * implementations can override this.
1554 */
1555 int __weak pcibios_add_device(struct pci_dev *dev)
1556 {
1557 return 0;
1558 }
1559
1560 /**
1561 * pcibios_release_device - provide arch specific hooks when releasing device dev
1562 * @dev: the PCI device being released
1563 *
1564 * Permits the platform to provide architecture specific functionality when
1565 * devices are released. This is the default implementation. Architecture
1566 * implementations can override this.
1567 */
1568 void __weak pcibios_release_device(struct pci_dev *dev) {}
1569
1570 /**
1571 * pcibios_disable_device - disable arch specific PCI resources for device dev
1572 * @dev: the PCI device to disable
1573 *
1574 * Disables architecture specific PCI resources for the device. This
1575 * is the default implementation. Architecture implementations can
1576 * override this.
1577 */
1578 void __weak pcibios_disable_device(struct pci_dev *dev) {}
1579
1580 /**
1581 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1582 * @irq: ISA IRQ to penalize
1583 * @active: IRQ active or not
1584 *
1585 * Permits the platform to provide architecture-specific functionality when
1586 * penalizing ISA IRQs. This is the default implementation. Architecture
1587 * implementations can override this.
1588 */
1589 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1590
1591 static void do_pci_disable_device(struct pci_dev *dev)
1592 {
1593 u16 pci_command;
1594
1595 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1596 if (pci_command & PCI_COMMAND_MASTER) {
1597 pci_command &= ~PCI_COMMAND_MASTER;
1598 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1599 }
1600
1601 pcibios_disable_device(dev);
1602 }
1603
1604 /**
1605 * pci_disable_enabled_device - Disable device without updating enable_cnt
1606 * @dev: PCI device to disable
1607 *
1608 * NOTE: This function is a backend of PCI power management routines and is
1609 * not supposed to be called drivers.
1610 */
1611 void pci_disable_enabled_device(struct pci_dev *dev)
1612 {
1613 if (pci_is_enabled(dev))
1614 do_pci_disable_device(dev);
1615 }
1616
1617 /**
1618 * pci_disable_device - Disable PCI device after use
1619 * @dev: PCI device to be disabled
1620 *
1621 * Signal to the system that the PCI device is not in use by the system
1622 * anymore. This only involves disabling PCI bus-mastering, if active.
1623 *
1624 * Note we don't actually disable the device until all callers of
1625 * pci_enable_device() have called pci_disable_device().
1626 */
1627 void pci_disable_device(struct pci_dev *dev)
1628 {
1629 struct pci_devres *dr;
1630
1631 dr = find_pci_dr(dev);
1632 if (dr)
1633 dr->enabled = 0;
1634
1635 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1636 "disabling already-disabled device");
1637
1638 if (atomic_dec_return(&dev->enable_cnt) != 0)
1639 return;
1640
1641 do_pci_disable_device(dev);
1642
1643 dev->is_busmaster = 0;
1644 }
1645 EXPORT_SYMBOL(pci_disable_device);
1646
1647 /**
1648 * pcibios_set_pcie_reset_state - set reset state for device dev
1649 * @dev: the PCIe device reset
1650 * @state: Reset state to enter into
1651 *
1652 *
1653 * Sets the PCIe reset state for the device. This is the default
1654 * implementation. Architecture implementations can override this.
1655 */
1656 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1657 enum pcie_reset_state state)
1658 {
1659 return -EINVAL;
1660 }
1661
1662 /**
1663 * pci_set_pcie_reset_state - set reset state for device dev
1664 * @dev: the PCIe device reset
1665 * @state: Reset state to enter into
1666 *
1667 *
1668 * Sets the PCI reset state for the device.
1669 */
1670 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1671 {
1672 return pcibios_set_pcie_reset_state(dev, state);
1673 }
1674 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1675
1676 /**
1677 * pci_check_pme_status - Check if given device has generated PME.
1678 * @dev: Device to check.
1679 *
1680 * Check the PME status of the device and if set, clear it and clear PME enable
1681 * (if set). Return 'true' if PME status and PME enable were both set or
1682 * 'false' otherwise.
1683 */
1684 bool pci_check_pme_status(struct pci_dev *dev)
1685 {
1686 int pmcsr_pos;
1687 u16 pmcsr;
1688 bool ret = false;
1689
1690 if (!dev->pm_cap)
1691 return false;
1692
1693 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1694 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1695 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1696 return false;
1697
1698 /* Clear PME status. */
1699 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1700 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1701 /* Disable PME to avoid interrupt flood. */
1702 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1703 ret = true;
1704 }
1705
1706 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1707
1708 return ret;
1709 }
1710
1711 /**
1712 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1713 * @dev: Device to handle.
1714 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1715 *
1716 * Check if @dev has generated PME and queue a resume request for it in that
1717 * case.
1718 */
1719 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1720 {
1721 if (pme_poll_reset && dev->pme_poll)
1722 dev->pme_poll = false;
1723
1724 if (pci_check_pme_status(dev)) {
1725 pci_wakeup_event(dev);
1726 pm_request_resume(&dev->dev);
1727 }
1728 return 0;
1729 }
1730
1731 /**
1732 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1733 * @bus: Top bus of the subtree to walk.
1734 */
1735 void pci_pme_wakeup_bus(struct pci_bus *bus)
1736 {
1737 if (bus)
1738 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1739 }
1740
1741
1742 /**
1743 * pci_pme_capable - check the capability of PCI device to generate PME#
1744 * @dev: PCI device to handle.
1745 * @state: PCI state from which device will issue PME#.
1746 */
1747 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1748 {
1749 if (!dev->pm_cap)
1750 return false;
1751
1752 return !!(dev->pme_support & (1 << state));
1753 }
1754 EXPORT_SYMBOL(pci_pme_capable);
1755
1756 static void pci_pme_list_scan(struct work_struct *work)
1757 {
1758 struct pci_pme_device *pme_dev, *n;
1759
1760 mutex_lock(&pci_pme_list_mutex);
1761 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1762 if (pme_dev->dev->pme_poll) {
1763 struct pci_dev *bridge;
1764
1765 bridge = pme_dev->dev->bus->self;
1766 /*
1767 * If bridge is in low power state, the
1768 * configuration space of subordinate devices
1769 * may be not accessible
1770 */
1771 if (bridge && bridge->current_state != PCI_D0)
1772 continue;
1773 pci_pme_wakeup(pme_dev->dev, NULL);
1774 } else {
1775 list_del(&pme_dev->list);
1776 kfree(pme_dev);
1777 }
1778 }
1779 if (!list_empty(&pci_pme_list))
1780 queue_delayed_work(system_freezable_wq, &pci_pme_work,
1781 msecs_to_jiffies(PME_TIMEOUT));
1782 mutex_unlock(&pci_pme_list_mutex);
1783 }
1784
1785 static void __pci_pme_active(struct pci_dev *dev, bool enable)
1786 {
1787 u16 pmcsr;
1788
1789 if (!dev->pme_support)
1790 return;
1791
1792 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1793 /* Clear PME_Status by writing 1 to it and enable PME# */
1794 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1795 if (!enable)
1796 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1797
1798 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1799 }
1800
1801 static void pci_pme_restore(struct pci_dev *dev)
1802 {
1803 u16 pmcsr;
1804
1805 if (!dev->pme_support)
1806 return;
1807
1808 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1809 if (dev->wakeup_prepared) {
1810 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1811 } else {
1812 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1813 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1814 }
1815 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1816 }
1817
1818 /**
1819 * pci_pme_active - enable or disable PCI device's PME# function
1820 * @dev: PCI device to handle.
1821 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1822 *
1823 * The caller must verify that the device is capable of generating PME# before
1824 * calling this function with @enable equal to 'true'.
1825 */
1826 void pci_pme_active(struct pci_dev *dev, bool enable)
1827 {
1828 __pci_pme_active(dev, enable);
1829
1830 /*
1831 * PCI (as opposed to PCIe) PME requires that the device have
1832 * its PME# line hooked up correctly. Not all hardware vendors
1833 * do this, so the PME never gets delivered and the device
1834 * remains asleep. The easiest way around this is to
1835 * periodically walk the list of suspended devices and check
1836 * whether any have their PME flag set. The assumption is that
1837 * we'll wake up often enough anyway that this won't be a huge
1838 * hit, and the power savings from the devices will still be a
1839 * win.
1840 *
1841 * Although PCIe uses in-band PME message instead of PME# line
1842 * to report PME, PME does not work for some PCIe devices in
1843 * reality. For example, there are devices that set their PME
1844 * status bits, but don't really bother to send a PME message;
1845 * there are PCI Express Root Ports that don't bother to
1846 * trigger interrupts when they receive PME messages from the
1847 * devices below. So PME poll is used for PCIe devices too.
1848 */
1849
1850 if (dev->pme_poll) {
1851 struct pci_pme_device *pme_dev;
1852 if (enable) {
1853 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1854 GFP_KERNEL);
1855 if (!pme_dev) {
1856 dev_warn(&dev->dev, "can't enable PME#\n");
1857 return;
1858 }
1859 pme_dev->dev = dev;
1860 mutex_lock(&pci_pme_list_mutex);
1861 list_add(&pme_dev->list, &pci_pme_list);
1862 if (list_is_singular(&pci_pme_list))
1863 queue_delayed_work(system_freezable_wq,
1864 &pci_pme_work,
1865 msecs_to_jiffies(PME_TIMEOUT));
1866 mutex_unlock(&pci_pme_list_mutex);
1867 } else {
1868 mutex_lock(&pci_pme_list_mutex);
1869 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1870 if (pme_dev->dev == dev) {
1871 list_del(&pme_dev->list);
1872 kfree(pme_dev);
1873 break;
1874 }
1875 }
1876 mutex_unlock(&pci_pme_list_mutex);
1877 }
1878 }
1879
1880 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1881 }
1882 EXPORT_SYMBOL(pci_pme_active);
1883
1884 /**
1885 * pci_enable_wake - enable PCI device as wakeup event source
1886 * @dev: PCI device affected
1887 * @state: PCI state from which device will issue wakeup events
1888 * @enable: True to enable event generation; false to disable
1889 *
1890 * This enables the device as a wakeup event source, or disables it.
1891 * When such events involves platform-specific hooks, those hooks are
1892 * called automatically by this routine.
1893 *
1894 * Devices with legacy power management (no standard PCI PM capabilities)
1895 * always require such platform hooks.
1896 *
1897 * RETURN VALUE:
1898 * 0 is returned on success
1899 * -EINVAL is returned if device is not supposed to wake up the system
1900 * Error code depending on the platform is returned if both the platform and
1901 * the native mechanism fail to enable the generation of wake-up events
1902 */
1903 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1904 {
1905 int ret = 0;
1906
1907 /*
1908 * Don't do the same thing twice in a row for one device, but restore
1909 * PME Enable in case it has been updated by config space restoration.
1910 */
1911 if (!!enable == !!dev->wakeup_prepared) {
1912 pci_pme_restore(dev);
1913 return 0;
1914 }
1915
1916 /*
1917 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1918 * Anderson we should be doing PME# wake enable followed by ACPI wake
1919 * enable. To disable wake-up we call the platform first, for symmetry.
1920 */
1921
1922 if (enable) {
1923 int error;
1924
1925 if (pci_pme_capable(dev, state))
1926 pci_pme_active(dev, true);
1927 else
1928 ret = 1;
1929 error = platform_pci_set_wakeup(dev, true);
1930 if (ret)
1931 ret = error;
1932 if (!ret)
1933 dev->wakeup_prepared = true;
1934 } else {
1935 platform_pci_set_wakeup(dev, false);
1936 pci_pme_active(dev, false);
1937 dev->wakeup_prepared = false;
1938 }
1939
1940 return ret;
1941 }
1942 EXPORT_SYMBOL(pci_enable_wake);
1943
1944 /**
1945 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1946 * @dev: PCI device to prepare
1947 * @enable: True to enable wake-up event generation; false to disable
1948 *
1949 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1950 * and this function allows them to set that up cleanly - pci_enable_wake()
1951 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1952 * ordering constraints.
1953 *
1954 * This function only returns error code if the device is not capable of
1955 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1956 * enable wake-up power for it.
1957 */
1958 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1959 {
1960 return pci_pme_capable(dev, PCI_D3cold) ?
1961 pci_enable_wake(dev, PCI_D3cold, enable) :
1962 pci_enable_wake(dev, PCI_D3hot, enable);
1963 }
1964 EXPORT_SYMBOL(pci_wake_from_d3);
1965
1966 /**
1967 * pci_target_state - find an appropriate low power state for a given PCI dev
1968 * @dev: PCI device
1969 *
1970 * Use underlying platform code to find a supported low power state for @dev.
1971 * If the platform can't manage @dev, return the deepest state from which it
1972 * can generate wake events, based on any available PME info.
1973 */
1974 static pci_power_t pci_target_state(struct pci_dev *dev)
1975 {
1976 pci_power_t target_state = PCI_D3hot;
1977
1978 if (platform_pci_power_manageable(dev)) {
1979 /*
1980 * Call the platform to choose the target state of the device
1981 * and enable wake-up from this state if supported.
1982 */
1983 pci_power_t state = platform_pci_choose_state(dev);
1984
1985 switch (state) {
1986 case PCI_POWER_ERROR:
1987 case PCI_UNKNOWN:
1988 break;
1989 case PCI_D1:
1990 case PCI_D2:
1991 if (pci_no_d1d2(dev))
1992 break;
1993 default:
1994 target_state = state;
1995 }
1996
1997 return target_state;
1998 }
1999
2000 if (!dev->pm_cap)
2001 target_state = PCI_D0;
2002
2003 /*
2004 * If the device is in D3cold even though it's not power-manageable by
2005 * the platform, it may have been powered down by non-standard means.
2006 * Best to let it slumber.
2007 */
2008 if (dev->current_state == PCI_D3cold)
2009 target_state = PCI_D3cold;
2010
2011 if (device_may_wakeup(&dev->dev)) {
2012 /*
2013 * Find the deepest state from which the device can generate
2014 * wake-up events, make it the target state and enable device
2015 * to generate PME#.
2016 */
2017 if (dev->pme_support) {
2018 while (target_state
2019 && !(dev->pme_support & (1 << target_state)))
2020 target_state--;
2021 }
2022 }
2023
2024 return target_state;
2025 }
2026
2027 /**
2028 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
2029 * @dev: Device to handle.
2030 *
2031 * Choose the power state appropriate for the device depending on whether
2032 * it can wake up the system and/or is power manageable by the platform
2033 * (PCI_D3hot is the default) and put the device into that state.
2034 */
2035 int pci_prepare_to_sleep(struct pci_dev *dev)
2036 {
2037 pci_power_t target_state = pci_target_state(dev);
2038 int error;
2039
2040 if (target_state == PCI_POWER_ERROR)
2041 return -EIO;
2042
2043 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
2044
2045 error = pci_set_power_state(dev, target_state);
2046
2047 if (error)
2048 pci_enable_wake(dev, target_state, false);
2049
2050 return error;
2051 }
2052 EXPORT_SYMBOL(pci_prepare_to_sleep);
2053
2054 /**
2055 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
2056 * @dev: Device to handle.
2057 *
2058 * Disable device's system wake-up capability and put it into D0.
2059 */
2060 int pci_back_from_sleep(struct pci_dev *dev)
2061 {
2062 pci_enable_wake(dev, PCI_D0, false);
2063 return pci_set_power_state(dev, PCI_D0);
2064 }
2065 EXPORT_SYMBOL(pci_back_from_sleep);
2066
2067 /**
2068 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2069 * @dev: PCI device being suspended.
2070 *
2071 * Prepare @dev to generate wake-up events at run time and put it into a low
2072 * power state.
2073 */
2074 int pci_finish_runtime_suspend(struct pci_dev *dev)
2075 {
2076 pci_power_t target_state = pci_target_state(dev);
2077 int error;
2078
2079 if (target_state == PCI_POWER_ERROR)
2080 return -EIO;
2081
2082 dev->runtime_d3cold = target_state == PCI_D3cold;
2083
2084 pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2085
2086 error = pci_set_power_state(dev, target_state);
2087
2088 if (error) {
2089 pci_enable_wake(dev, target_state, false);
2090 dev->runtime_d3cold = false;
2091 }
2092
2093 return error;
2094 }
2095
2096 /**
2097 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2098 * @dev: Device to check.
2099 *
2100 * Return true if the device itself is capable of generating wake-up events
2101 * (through the platform or using the native PCIe PME) or if the device supports
2102 * PME and one of its upstream bridges can generate wake-up events.
2103 */
2104 bool pci_dev_run_wake(struct pci_dev *dev)
2105 {
2106 struct pci_bus *bus = dev->bus;
2107
2108 if (device_can_wakeup(&dev->dev))
2109 return true;
2110
2111 if (!dev->pme_support)
2112 return false;
2113
2114 /* PME-capable in principle, but not from the intended sleep state */
2115 if (!pci_pme_capable(dev, pci_target_state(dev)))
2116 return false;
2117
2118 while (bus->parent) {
2119 struct pci_dev *bridge = bus->self;
2120
2121 if (device_can_wakeup(&bridge->dev))
2122 return true;
2123
2124 bus = bus->parent;
2125 }
2126
2127 /* We have reached the root bus. */
2128 if (bus->bridge)
2129 return device_can_wakeup(bus->bridge);
2130
2131 return false;
2132 }
2133 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2134
2135 /**
2136 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2137 * @pci_dev: Device to check.
2138 *
2139 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2140 * reconfigured due to wakeup settings difference between system and runtime
2141 * suspend and the current power state of it is suitable for the upcoming
2142 * (system) transition.
2143 *
2144 * If the device is not configured for system wakeup, disable PME for it before
2145 * returning 'true' to prevent it from waking up the system unnecessarily.
2146 */
2147 bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2148 {
2149 struct device *dev = &pci_dev->dev;
2150
2151 if (!pm_runtime_suspended(dev)
2152 || pci_target_state(pci_dev) != pci_dev->current_state
2153 || platform_pci_need_resume(pci_dev)
2154 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2155 return false;
2156
2157 /*
2158 * At this point the device is good to go unless it's been configured
2159 * to generate PME at the runtime suspend time, but it is not supposed
2160 * to wake up the system. In that case, simply disable PME for it
2161 * (it will have to be re-enabled on exit from system resume).
2162 *
2163 * If the device's power state is D3cold and the platform check above
2164 * hasn't triggered, the device's configuration is suitable and we don't
2165 * need to manipulate it at all.
2166 */
2167 spin_lock_irq(&dev->power.lock);
2168
2169 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2170 !device_may_wakeup(dev))
2171 __pci_pme_active(pci_dev, false);
2172
2173 spin_unlock_irq(&dev->power.lock);
2174 return true;
2175 }
2176
2177 /**
2178 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2179 * @pci_dev: Device to handle.
2180 *
2181 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2182 * it might have been disabled during the prepare phase of system suspend if
2183 * the device was not configured for system wakeup.
2184 */
2185 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2186 {
2187 struct device *dev = &pci_dev->dev;
2188
2189 if (!pci_dev_run_wake(pci_dev))
2190 return;
2191
2192 spin_lock_irq(&dev->power.lock);
2193
2194 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2195 __pci_pme_active(pci_dev, true);
2196
2197 spin_unlock_irq(&dev->power.lock);
2198 }
2199
2200 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2201 {
2202 struct device *dev = &pdev->dev;
2203 struct device *parent = dev->parent;
2204
2205 if (parent)
2206 pm_runtime_get_sync(parent);
2207 pm_runtime_get_noresume(dev);
2208 /*
2209 * pdev->current_state is set to PCI_D3cold during suspending,
2210 * so wait until suspending completes
2211 */
2212 pm_runtime_barrier(dev);
2213 /*
2214 * Only need to resume devices in D3cold, because config
2215 * registers are still accessible for devices suspended but
2216 * not in D3cold.
2217 */
2218 if (pdev->current_state == PCI_D3cold)
2219 pm_runtime_resume(dev);
2220 }
2221
2222 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2223 {
2224 struct device *dev = &pdev->dev;
2225 struct device *parent = dev->parent;
2226
2227 pm_runtime_put(dev);
2228 if (parent)
2229 pm_runtime_put_sync(parent);
2230 }
2231
2232 /**
2233 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2234 * @bridge: Bridge to check
2235 *
2236 * This function checks if it is possible to move the bridge to D3.
2237 * Currently we only allow D3 for recent enough PCIe ports.
2238 */
2239 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2240 {
2241 unsigned int year;
2242
2243 if (!pci_is_pcie(bridge))
2244 return false;
2245
2246 switch (pci_pcie_type(bridge)) {
2247 case PCI_EXP_TYPE_ROOT_PORT:
2248 case PCI_EXP_TYPE_UPSTREAM:
2249 case PCI_EXP_TYPE_DOWNSTREAM:
2250 if (pci_bridge_d3_disable)
2251 return false;
2252
2253 /*
2254 * Hotplug interrupts cannot be delivered if the link is down,
2255 * so parents of a hotplug port must stay awake. In addition,
2256 * hotplug ports handled by firmware in System Management Mode
2257 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2258 * For simplicity, disallow in general for now.
2259 */
2260 if (bridge->is_hotplug_bridge)
2261 return false;
2262
2263 if (pci_bridge_d3_force)
2264 return true;
2265
2266 /*
2267 * It should be safe to put PCIe ports from 2015 or newer
2268 * to D3.
2269 */
2270 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
2271 year >= 2015) {
2272 return true;
2273 }
2274 break;
2275 }
2276
2277 return false;
2278 }
2279
2280 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2281 {
2282 bool *d3cold_ok = data;
2283
2284 if (/* The device needs to be allowed to go D3cold ... */
2285 dev->no_d3cold || !dev->d3cold_allowed ||
2286
2287 /* ... and if it is wakeup capable to do so from D3cold. */
2288 (device_may_wakeup(&dev->dev) &&
2289 !pci_pme_capable(dev, PCI_D3cold)) ||
2290
2291 /* If it is a bridge it must be allowed to go to D3. */
2292 !pci_power_manageable(dev))
2293
2294 *d3cold_ok = false;
2295
2296 return !*d3cold_ok;
2297 }
2298
2299 /*
2300 * pci_bridge_d3_update - Update bridge D3 capabilities
2301 * @dev: PCI device which is changed
2302 *
2303 * Update upstream bridge PM capabilities accordingly depending on if the
2304 * device PM configuration was changed or the device is being removed. The
2305 * change is also propagated upstream.
2306 */
2307 void pci_bridge_d3_update(struct pci_dev *dev)
2308 {
2309 bool remove = !device_is_registered(&dev->dev);
2310 struct pci_dev *bridge;
2311 bool d3cold_ok = true;
2312
2313 bridge = pci_upstream_bridge(dev);
2314 if (!bridge || !pci_bridge_d3_possible(bridge))
2315 return;
2316
2317 /*
2318 * If D3 is currently allowed for the bridge, removing one of its
2319 * children won't change that.
2320 */
2321 if (remove && bridge->bridge_d3)
2322 return;
2323
2324 /*
2325 * If D3 is currently allowed for the bridge and a child is added or
2326 * changed, disallowance of D3 can only be caused by that child, so
2327 * we only need to check that single device, not any of its siblings.
2328 *
2329 * If D3 is currently not allowed for the bridge, checking the device
2330 * first may allow us to skip checking its siblings.
2331 */
2332 if (!remove)
2333 pci_dev_check_d3cold(dev, &d3cold_ok);
2334
2335 /*
2336 * If D3 is currently not allowed for the bridge, this may be caused
2337 * either by the device being changed/removed or any of its siblings,
2338 * so we need to go through all children to find out if one of them
2339 * continues to block D3.
2340 */
2341 if (d3cold_ok && !bridge->bridge_d3)
2342 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2343 &d3cold_ok);
2344
2345 if (bridge->bridge_d3 != d3cold_ok) {
2346 bridge->bridge_d3 = d3cold_ok;
2347 /* Propagate change to upstream bridges */
2348 pci_bridge_d3_update(bridge);
2349 }
2350 }
2351
2352 /**
2353 * pci_d3cold_enable - Enable D3cold for device
2354 * @dev: PCI device to handle
2355 *
2356 * This function can be used in drivers to enable D3cold from the device
2357 * they handle. It also updates upstream PCI bridge PM capabilities
2358 * accordingly.
2359 */
2360 void pci_d3cold_enable(struct pci_dev *dev)
2361 {
2362 if (dev->no_d3cold) {
2363 dev->no_d3cold = false;
2364 pci_bridge_d3_update(dev);
2365 }
2366 }
2367 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2368
2369 /**
2370 * pci_d3cold_disable - Disable D3cold for device
2371 * @dev: PCI device to handle
2372 *
2373 * This function can be used in drivers to disable D3cold from the device
2374 * they handle. It also updates upstream PCI bridge PM capabilities
2375 * accordingly.
2376 */
2377 void pci_d3cold_disable(struct pci_dev *dev)
2378 {
2379 if (!dev->no_d3cold) {
2380 dev->no_d3cold = true;
2381 pci_bridge_d3_update(dev);
2382 }
2383 }
2384 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2385
2386 /**
2387 * pci_pm_init - Initialize PM functions of given PCI device
2388 * @dev: PCI device to handle.
2389 */
2390 void pci_pm_init(struct pci_dev *dev)
2391 {
2392 int pm;
2393 u16 pmc;
2394
2395 pm_runtime_forbid(&dev->dev);
2396 pm_runtime_set_active(&dev->dev);
2397 pm_runtime_enable(&dev->dev);
2398 device_enable_async_suspend(&dev->dev);
2399 dev->wakeup_prepared = false;
2400
2401 dev->pm_cap = 0;
2402 dev->pme_support = 0;
2403
2404 /* find PCI PM capability in list */
2405 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2406 if (!pm)
2407 return;
2408 /* Check device's ability to generate PME# */
2409 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2410
2411 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2412 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2413 pmc & PCI_PM_CAP_VER_MASK);
2414 return;
2415 }
2416
2417 dev->pm_cap = pm;
2418 dev->d3_delay = PCI_PM_D3_WAIT;
2419 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2420 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2421 dev->d3cold_allowed = true;
2422
2423 dev->d1_support = false;
2424 dev->d2_support = false;
2425 if (!pci_no_d1d2(dev)) {
2426 if (pmc & PCI_PM_CAP_D1)
2427 dev->d1_support = true;
2428 if (pmc & PCI_PM_CAP_D2)
2429 dev->d2_support = true;
2430
2431 if (dev->d1_support || dev->d2_support)
2432 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2433 dev->d1_support ? " D1" : "",
2434 dev->d2_support ? " D2" : "");
2435 }
2436
2437 pmc &= PCI_PM_CAP_PME_MASK;
2438 if (pmc) {
2439 dev_printk(KERN_DEBUG, &dev->dev,
2440 "PME# supported from%s%s%s%s%s\n",
2441 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2442 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2443 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2444 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2445 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2446 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2447 dev->pme_poll = true;
2448 /*
2449 * Make device's PM flags reflect the wake-up capability, but
2450 * let the user space enable it to wake up the system as needed.
2451 */
2452 device_set_wakeup_capable(&dev->dev, true);
2453 /* Disable the PME# generation functionality */
2454 pci_pme_active(dev, false);
2455 }
2456 }
2457
2458 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2459 {
2460 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2461
2462 switch (prop) {
2463 case PCI_EA_P_MEM:
2464 case PCI_EA_P_VF_MEM:
2465 flags |= IORESOURCE_MEM;
2466 break;
2467 case PCI_EA_P_MEM_PREFETCH:
2468 case PCI_EA_P_VF_MEM_PREFETCH:
2469 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2470 break;
2471 case PCI_EA_P_IO:
2472 flags |= IORESOURCE_IO;
2473 break;
2474 default:
2475 return 0;
2476 }
2477
2478 return flags;
2479 }
2480
2481 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2482 u8 prop)
2483 {
2484 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2485 return &dev->resource[bei];
2486 #ifdef CONFIG_PCI_IOV
2487 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2488 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2489 return &dev->resource[PCI_IOV_RESOURCES +
2490 bei - PCI_EA_BEI_VF_BAR0];
2491 #endif
2492 else if (bei == PCI_EA_BEI_ROM)
2493 return &dev->resource[PCI_ROM_RESOURCE];
2494 else
2495 return NULL;
2496 }
2497
2498 /* Read an Enhanced Allocation (EA) entry */
2499 static int pci_ea_read(struct pci_dev *dev, int offset)
2500 {
2501 struct resource *res;
2502 int ent_size, ent_offset = offset;
2503 resource_size_t start, end;
2504 unsigned long flags;
2505 u32 dw0, bei, base, max_offset;
2506 u8 prop;
2507 bool support_64 = (sizeof(resource_size_t) >= 8);
2508
2509 pci_read_config_dword(dev, ent_offset, &dw0);
2510 ent_offset += 4;
2511
2512 /* Entry size field indicates DWORDs after 1st */
2513 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2514
2515 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2516 goto out;
2517
2518 bei = (dw0 & PCI_EA_BEI) >> 4;
2519 prop = (dw0 & PCI_EA_PP) >> 8;
2520
2521 /*
2522 * If the Property is in the reserved range, try the Secondary
2523 * Property instead.
2524 */
2525 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2526 prop = (dw0 & PCI_EA_SP) >> 16;
2527 if (prop > PCI_EA_P_BRIDGE_IO)
2528 goto out;
2529
2530 res = pci_ea_get_resource(dev, bei, prop);
2531 if (!res) {
2532 dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
2533 goto out;
2534 }
2535
2536 flags = pci_ea_flags(dev, prop);
2537 if (!flags) {
2538 dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
2539 goto out;
2540 }
2541
2542 /* Read Base */
2543 pci_read_config_dword(dev, ent_offset, &base);
2544 start = (base & PCI_EA_FIELD_MASK);
2545 ent_offset += 4;
2546
2547 /* Read MaxOffset */
2548 pci_read_config_dword(dev, ent_offset, &max_offset);
2549 ent_offset += 4;
2550
2551 /* Read Base MSBs (if 64-bit entry) */
2552 if (base & PCI_EA_IS_64) {
2553 u32 base_upper;
2554
2555 pci_read_config_dword(dev, ent_offset, &base_upper);
2556 ent_offset += 4;
2557
2558 flags |= IORESOURCE_MEM_64;
2559
2560 /* entry starts above 32-bit boundary, can't use */
2561 if (!support_64 && base_upper)
2562 goto out;
2563
2564 if (support_64)
2565 start |= ((u64)base_upper << 32);
2566 }
2567
2568 end = start + (max_offset | 0x03);
2569
2570 /* Read MaxOffset MSBs (if 64-bit entry) */
2571 if (max_offset & PCI_EA_IS_64) {
2572 u32 max_offset_upper;
2573
2574 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2575 ent_offset += 4;
2576
2577 flags |= IORESOURCE_MEM_64;
2578
2579 /* entry too big, can't use */
2580 if (!support_64 && max_offset_upper)
2581 goto out;
2582
2583 if (support_64)
2584 end += ((u64)max_offset_upper << 32);
2585 }
2586
2587 if (end < start) {
2588 dev_err(&dev->dev, "EA Entry crosses address boundary\n");
2589 goto out;
2590 }
2591
2592 if (ent_size != ent_offset - offset) {
2593 dev_err(&dev->dev,
2594 "EA Entry Size (%d) does not match length read (%d)\n",
2595 ent_size, ent_offset - offset);
2596 goto out;
2597 }
2598
2599 res->name = pci_name(dev);
2600 res->start = start;
2601 res->end = end;
2602 res->flags = flags;
2603
2604 if (bei <= PCI_EA_BEI_BAR5)
2605 dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2606 bei, res, prop);
2607 else if (bei == PCI_EA_BEI_ROM)
2608 dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2609 res, prop);
2610 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2611 dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2612 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2613 else
2614 dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2615 bei, res, prop);
2616
2617 out:
2618 return offset + ent_size;
2619 }
2620
2621 /* Enhanced Allocation Initialization */
2622 void pci_ea_init(struct pci_dev *dev)
2623 {
2624 int ea;
2625 u8 num_ent;
2626 int offset;
2627 int i;
2628
2629 /* find PCI EA capability in list */
2630 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2631 if (!ea)
2632 return;
2633
2634 /* determine the number of entries */
2635 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2636 &num_ent);
2637 num_ent &= PCI_EA_NUM_ENT_MASK;
2638
2639 offset = ea + PCI_EA_FIRST_ENT;
2640
2641 /* Skip DWORD 2 for type 1 functions */
2642 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2643 offset += 4;
2644
2645 /* parse each EA entry */
2646 for (i = 0; i < num_ent; ++i)
2647 offset = pci_ea_read(dev, offset);
2648 }
2649
2650 static void pci_add_saved_cap(struct pci_dev *pci_dev,
2651 struct pci_cap_saved_state *new_cap)
2652 {
2653 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2654 }
2655
2656 /**
2657 * _pci_add_cap_save_buffer - allocate buffer for saving given
2658 * capability registers
2659 * @dev: the PCI device
2660 * @cap: the capability to allocate the buffer for
2661 * @extended: Standard or Extended capability ID
2662 * @size: requested size of the buffer
2663 */
2664 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2665 bool extended, unsigned int size)
2666 {
2667 int pos;
2668 struct pci_cap_saved_state *save_state;
2669
2670 if (extended)
2671 pos = pci_find_ext_capability(dev, cap);
2672 else
2673 pos = pci_find_capability(dev, cap);
2674
2675 if (!pos)
2676 return 0;
2677
2678 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2679 if (!save_state)
2680 return -ENOMEM;
2681
2682 save_state->cap.cap_nr = cap;
2683 save_state->cap.cap_extended = extended;
2684 save_state->cap.size = size;
2685 pci_add_saved_cap(dev, save_state);
2686
2687 return 0;
2688 }
2689
2690 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2691 {
2692 return _pci_add_cap_save_buffer(dev, cap, false, size);
2693 }
2694
2695 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2696 {
2697 return _pci_add_cap_save_buffer(dev, cap, true, size);
2698 }
2699
2700 /**
2701 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2702 * @dev: the PCI device
2703 */
2704 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2705 {
2706 int error;
2707
2708 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2709 PCI_EXP_SAVE_REGS * sizeof(u16));
2710 if (error)
2711 dev_err(&dev->dev,
2712 "unable to preallocate PCI Express save buffer\n");
2713
2714 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2715 if (error)
2716 dev_err(&dev->dev,
2717 "unable to preallocate PCI-X save buffer\n");
2718
2719 pci_allocate_vc_save_buffers(dev);
2720 }
2721
2722 void pci_free_cap_save_buffers(struct pci_dev *dev)
2723 {
2724 struct pci_cap_saved_state *tmp;
2725 struct hlist_node *n;
2726
2727 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2728 kfree(tmp);
2729 }
2730
2731 /**
2732 * pci_configure_ari - enable or disable ARI forwarding
2733 * @dev: the PCI device
2734 *
2735 * If @dev and its upstream bridge both support ARI, enable ARI in the
2736 * bridge. Otherwise, disable ARI in the bridge.
2737 */
2738 void pci_configure_ari(struct pci_dev *dev)
2739 {
2740 u32 cap;
2741 struct pci_dev *bridge;
2742
2743 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2744 return;
2745
2746 bridge = dev->bus->self;
2747 if (!bridge)
2748 return;
2749
2750 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2751 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2752 return;
2753
2754 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2755 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2756 PCI_EXP_DEVCTL2_ARI);
2757 bridge->ari_enabled = 1;
2758 } else {
2759 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2760 PCI_EXP_DEVCTL2_ARI);
2761 bridge->ari_enabled = 0;
2762 }
2763 }
2764
2765 static int pci_acs_enable;
2766
2767 /**
2768 * pci_request_acs - ask for ACS to be enabled if supported
2769 */
2770 void pci_request_acs(void)
2771 {
2772 pci_acs_enable = 1;
2773 }
2774
2775 /**
2776 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
2777 * @dev: the PCI device
2778 */
2779 static void pci_std_enable_acs(struct pci_dev *dev)
2780 {
2781 int pos;
2782 u16 cap;
2783 u16 ctrl;
2784
2785 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2786 if (!pos)
2787 return;
2788
2789 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2790 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2791
2792 /* Source Validation */
2793 ctrl |= (cap & PCI_ACS_SV);
2794
2795 /* P2P Request Redirect */
2796 ctrl |= (cap & PCI_ACS_RR);
2797
2798 /* P2P Completion Redirect */
2799 ctrl |= (cap & PCI_ACS_CR);
2800
2801 /* Upstream Forwarding */
2802 ctrl |= (cap & PCI_ACS_UF);
2803
2804 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2805 }
2806
2807 /**
2808 * pci_enable_acs - enable ACS if hardware support it
2809 * @dev: the PCI device
2810 */
2811 void pci_enable_acs(struct pci_dev *dev)
2812 {
2813 if (!pci_acs_enable)
2814 return;
2815
2816 if (!pci_dev_specific_enable_acs(dev))
2817 return;
2818
2819 pci_std_enable_acs(dev);
2820 }
2821
2822 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2823 {
2824 int pos;
2825 u16 cap, ctrl;
2826
2827 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2828 if (!pos)
2829 return false;
2830
2831 /*
2832 * Except for egress control, capabilities are either required
2833 * or only required if controllable. Features missing from the
2834 * capability field can therefore be assumed as hard-wired enabled.
2835 */
2836 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2837 acs_flags &= (cap | PCI_ACS_EC);
2838
2839 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2840 return (ctrl & acs_flags) == acs_flags;
2841 }
2842
2843 /**
2844 * pci_acs_enabled - test ACS against required flags for a given device
2845 * @pdev: device to test
2846 * @acs_flags: required PCI ACS flags
2847 *
2848 * Return true if the device supports the provided flags. Automatically
2849 * filters out flags that are not implemented on multifunction devices.
2850 *
2851 * Note that this interface checks the effective ACS capabilities of the
2852 * device rather than the actual capabilities. For instance, most single
2853 * function endpoints are not required to support ACS because they have no
2854 * opportunity for peer-to-peer access. We therefore return 'true'
2855 * regardless of whether the device exposes an ACS capability. This makes
2856 * it much easier for callers of this function to ignore the actual type
2857 * or topology of the device when testing ACS support.
2858 */
2859 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2860 {
2861 int ret;
2862
2863 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2864 if (ret >= 0)
2865 return ret > 0;
2866
2867 /*
2868 * Conventional PCI and PCI-X devices never support ACS, either
2869 * effectively or actually. The shared bus topology implies that
2870 * any device on the bus can receive or snoop DMA.
2871 */
2872 if (!pci_is_pcie(pdev))
2873 return false;
2874
2875 switch (pci_pcie_type(pdev)) {
2876 /*
2877 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2878 * but since their primary interface is PCI/X, we conservatively
2879 * handle them as we would a non-PCIe device.
2880 */
2881 case PCI_EXP_TYPE_PCIE_BRIDGE:
2882 /*
2883 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2884 * applicable... must never implement an ACS Extended Capability...".
2885 * This seems arbitrary, but we take a conservative interpretation
2886 * of this statement.
2887 */
2888 case PCI_EXP_TYPE_PCI_BRIDGE:
2889 case PCI_EXP_TYPE_RC_EC:
2890 return false;
2891 /*
2892 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2893 * implement ACS in order to indicate their peer-to-peer capabilities,
2894 * regardless of whether they are single- or multi-function devices.
2895 */
2896 case PCI_EXP_TYPE_DOWNSTREAM:
2897 case PCI_EXP_TYPE_ROOT_PORT:
2898 return pci_acs_flags_enabled(pdev, acs_flags);
2899 /*
2900 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2901 * implemented by the remaining PCIe types to indicate peer-to-peer
2902 * capabilities, but only when they are part of a multifunction
2903 * device. The footnote for section 6.12 indicates the specific
2904 * PCIe types included here.
2905 */
2906 case PCI_EXP_TYPE_ENDPOINT:
2907 case PCI_EXP_TYPE_UPSTREAM:
2908 case PCI_EXP_TYPE_LEG_END:
2909 case PCI_EXP_TYPE_RC_END:
2910 if (!pdev->multifunction)
2911 break;
2912
2913 return pci_acs_flags_enabled(pdev, acs_flags);
2914 }
2915
2916 /*
2917 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2918 * to single function devices with the exception of downstream ports.
2919 */
2920 return true;
2921 }
2922
2923 /**
2924 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2925 * @start: starting downstream device
2926 * @end: ending upstream device or NULL to search to the root bus
2927 * @acs_flags: required flags
2928 *
2929 * Walk up a device tree from start to end testing PCI ACS support. If
2930 * any step along the way does not support the required flags, return false.
2931 */
2932 bool pci_acs_path_enabled(struct pci_dev *start,
2933 struct pci_dev *end, u16 acs_flags)
2934 {
2935 struct pci_dev *pdev, *parent = start;
2936
2937 do {
2938 pdev = parent;
2939
2940 if (!pci_acs_enabled(pdev, acs_flags))
2941 return false;
2942
2943 if (pci_is_root_bus(pdev->bus))
2944 return (end == NULL);
2945
2946 parent = pdev->bus->self;
2947 } while (pdev != end);
2948
2949 return true;
2950 }
2951
2952 /**
2953 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2954 * @dev: the PCI device
2955 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2956 *
2957 * Perform INTx swizzling for a device behind one level of bridge. This is
2958 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2959 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2960 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2961 * the PCI Express Base Specification, Revision 2.1)
2962 */
2963 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2964 {
2965 int slot;
2966
2967 if (pci_ari_enabled(dev->bus))
2968 slot = 0;
2969 else
2970 slot = PCI_SLOT(dev->devfn);
2971
2972 return (((pin - 1) + slot) % 4) + 1;
2973 }
2974
2975 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2976 {
2977 u8 pin;
2978
2979 pin = dev->pin;
2980 if (!pin)
2981 return -1;
2982
2983 while (!pci_is_root_bus(dev->bus)) {
2984 pin = pci_swizzle_interrupt_pin(dev, pin);
2985 dev = dev->bus->self;
2986 }
2987 *bridge = dev;
2988 return pin;
2989 }
2990
2991 /**
2992 * pci_common_swizzle - swizzle INTx all the way to root bridge
2993 * @dev: the PCI device
2994 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2995 *
2996 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2997 * bridges all the way up to a PCI root bus.
2998 */
2999 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3000 {
3001 u8 pin = *pinp;
3002
3003 while (!pci_is_root_bus(dev->bus)) {
3004 pin = pci_swizzle_interrupt_pin(dev, pin);
3005 dev = dev->bus->self;
3006 }
3007 *pinp = pin;
3008 return PCI_SLOT(dev->devfn);
3009 }
3010 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3011
3012 /**
3013 * pci_release_region - Release a PCI bar
3014 * @pdev: PCI device whose resources were previously reserved by pci_request_region
3015 * @bar: BAR to release
3016 *
3017 * Releases the PCI I/O and memory resources previously reserved by a
3018 * successful call to pci_request_region. Call this function only
3019 * after all use of the PCI regions has ceased.
3020 */
3021 void pci_release_region(struct pci_dev *pdev, int bar)
3022 {
3023 struct pci_devres *dr;
3024
3025 if (pci_resource_len(pdev, bar) == 0)
3026 return;
3027 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3028 release_region(pci_resource_start(pdev, bar),
3029 pci_resource_len(pdev, bar));
3030 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3031 release_mem_region(pci_resource_start(pdev, bar),
3032 pci_resource_len(pdev, bar));
3033
3034 dr = find_pci_dr(pdev);
3035 if (dr)
3036 dr->region_mask &= ~(1 << bar);
3037 }
3038 EXPORT_SYMBOL(pci_release_region);
3039
3040 /**
3041 * __pci_request_region - Reserved PCI I/O and memory resource
3042 * @pdev: PCI device whose resources are to be reserved
3043 * @bar: BAR to be reserved
3044 * @res_name: Name to be associated with resource.
3045 * @exclusive: whether the region access is exclusive or not
3046 *
3047 * Mark the PCI region associated with PCI device @pdev BR @bar as
3048 * being reserved by owner @res_name. Do not access any
3049 * address inside the PCI regions unless this call returns
3050 * successfully.
3051 *
3052 * If @exclusive is set, then the region is marked so that userspace
3053 * is explicitly not allowed to map the resource via /dev/mem or
3054 * sysfs MMIO access.
3055 *
3056 * Returns 0 on success, or %EBUSY on error. A warning
3057 * message is also printed on failure.
3058 */
3059 static int __pci_request_region(struct pci_dev *pdev, int bar,
3060 const char *res_name, int exclusive)
3061 {
3062 struct pci_devres *dr;
3063
3064 if (pci_resource_len(pdev, bar) == 0)
3065 return 0;
3066
3067 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3068 if (!request_region(pci_resource_start(pdev, bar),
3069 pci_resource_len(pdev, bar), res_name))
3070 goto err_out;
3071 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3072 if (!__request_mem_region(pci_resource_start(pdev, bar),
3073 pci_resource_len(pdev, bar), res_name,
3074 exclusive))
3075 goto err_out;
3076 }
3077
3078 dr = find_pci_dr(pdev);
3079 if (dr)
3080 dr->region_mask |= 1 << bar;
3081
3082 return 0;
3083
3084 err_out:
3085 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
3086 &pdev->resource[bar]);
3087 return -EBUSY;
3088 }
3089
3090 /**
3091 * pci_request_region - Reserve PCI I/O and memory resource
3092 * @pdev: PCI device whose resources are to be reserved
3093 * @bar: BAR to be reserved
3094 * @res_name: Name to be associated with resource
3095 *
3096 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3097 * being reserved by owner @res_name. Do not access any
3098 * address inside the PCI regions unless this call returns
3099 * successfully.
3100 *
3101 * Returns 0 on success, or %EBUSY on error. A warning
3102 * message is also printed on failure.
3103 */
3104 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3105 {
3106 return __pci_request_region(pdev, bar, res_name, 0);
3107 }
3108 EXPORT_SYMBOL(pci_request_region);
3109
3110 /**
3111 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
3112 * @pdev: PCI device whose resources are to be reserved
3113 * @bar: BAR to be reserved
3114 * @res_name: Name to be associated with resource.
3115 *
3116 * Mark the PCI region associated with PCI device @pdev BR @bar as
3117 * being reserved by owner @res_name. Do not access any
3118 * address inside the PCI regions unless this call returns
3119 * successfully.
3120 *
3121 * Returns 0 on success, or %EBUSY on error. A warning
3122 * message is also printed on failure.
3123 *
3124 * The key difference that _exclusive makes it that userspace is
3125 * explicitly not allowed to map the resource via /dev/mem or
3126 * sysfs.
3127 */
3128 int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3129 const char *res_name)
3130 {
3131 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3132 }
3133 EXPORT_SYMBOL(pci_request_region_exclusive);
3134
3135 /**
3136 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3137 * @pdev: PCI device whose resources were previously reserved
3138 * @bars: Bitmask of BARs to be released
3139 *
3140 * Release selected PCI I/O and memory resources previously reserved.
3141 * Call this function only after all use of the PCI regions has ceased.
3142 */
3143 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3144 {
3145 int i;
3146
3147 for (i = 0; i < 6; i++)
3148 if (bars & (1 << i))
3149 pci_release_region(pdev, i);
3150 }
3151 EXPORT_SYMBOL(pci_release_selected_regions);
3152
3153 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3154 const char *res_name, int excl)
3155 {
3156 int i;
3157
3158 for (i = 0; i < 6; i++)
3159 if (bars & (1 << i))
3160 if (__pci_request_region(pdev, i, res_name, excl))
3161 goto err_out;
3162 return 0;
3163
3164 err_out:
3165 while (--i >= 0)
3166 if (bars & (1 << i))
3167 pci_release_region(pdev, i);
3168
3169 return -EBUSY;
3170 }
3171
3172
3173 /**
3174 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3175 * @pdev: PCI device whose resources are to be reserved
3176 * @bars: Bitmask of BARs to be requested
3177 * @res_name: Name to be associated with resource
3178 */
3179 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3180 const char *res_name)
3181 {
3182 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3183 }
3184 EXPORT_SYMBOL(pci_request_selected_regions);
3185
3186 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3187 const char *res_name)
3188 {
3189 return __pci_request_selected_regions(pdev, bars, res_name,
3190 IORESOURCE_EXCLUSIVE);
3191 }
3192 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3193
3194 /**
3195 * pci_release_regions - Release reserved PCI I/O and memory resources
3196 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
3197 *
3198 * Releases all PCI I/O and memory resources previously reserved by a
3199 * successful call to pci_request_regions. Call this function only
3200 * after all use of the PCI regions has ceased.
3201 */
3202
3203 void pci_release_regions(struct pci_dev *pdev)
3204 {
3205 pci_release_selected_regions(pdev, (1 << 6) - 1);
3206 }
3207 EXPORT_SYMBOL(pci_release_regions);
3208
3209 /**
3210 * pci_request_regions - Reserved PCI I/O and memory resources
3211 * @pdev: PCI device whose resources are to be reserved
3212 * @res_name: Name to be associated with resource.
3213 *
3214 * Mark all PCI regions associated with PCI device @pdev as
3215 * being reserved by owner @res_name. Do not access any
3216 * address inside the PCI regions unless this call returns
3217 * successfully.
3218 *
3219 * Returns 0 on success, or %EBUSY on error. A warning
3220 * message is also printed on failure.
3221 */
3222 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3223 {
3224 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3225 }
3226 EXPORT_SYMBOL(pci_request_regions);
3227
3228 /**
3229 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3230 * @pdev: PCI device whose resources are to be reserved
3231 * @res_name: Name to be associated with resource.
3232 *
3233 * Mark all PCI regions associated with PCI device @pdev as
3234 * being reserved by owner @res_name. Do not access any
3235 * address inside the PCI regions unless this call returns
3236 * successfully.
3237 *
3238 * pci_request_regions_exclusive() will mark the region so that
3239 * /dev/mem and the sysfs MMIO access will not be allowed.
3240 *
3241 * Returns 0 on success, or %EBUSY on error. A warning
3242 * message is also printed on failure.
3243 */
3244 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3245 {
3246 return pci_request_selected_regions_exclusive(pdev,
3247 ((1 << 6) - 1), res_name);
3248 }
3249 EXPORT_SYMBOL(pci_request_regions_exclusive);
3250
3251 #ifdef PCI_IOBASE
3252 struct io_range {
3253 struct list_head list;
3254 phys_addr_t start;
3255 resource_size_t size;
3256 };
3257
3258 static LIST_HEAD(io_range_list);
3259 static DEFINE_SPINLOCK(io_range_lock);
3260 #endif
3261
3262 /*
3263 * Record the PCI IO range (expressed as CPU physical address + size).
3264 * Return a negative value if an error has occured, zero otherwise
3265 */
3266 int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
3267 {
3268 int err = 0;
3269
3270 #ifdef PCI_IOBASE
3271 struct io_range *range;
3272 resource_size_t allocated_size = 0;
3273
3274 /* check if the range hasn't been previously recorded */
3275 spin_lock(&io_range_lock);
3276 list_for_each_entry(range, &io_range_list, list) {
3277 if (addr >= range->start && addr + size <= range->start + size) {
3278 /* range already registered, bail out */
3279 goto end_register;
3280 }
3281 allocated_size += range->size;
3282 }
3283
3284 /* range not registed yet, check for available space */
3285 if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
3286 /* if it's too big check if 64K space can be reserved */
3287 if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
3288 err = -E2BIG;
3289 goto end_register;
3290 }
3291
3292 size = SZ_64K;
3293 pr_warn("Requested IO range too big, new size set to 64K\n");
3294 }
3295
3296 /* add the range to the list */
3297 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3298 if (!range) {
3299 err = -ENOMEM;
3300 goto end_register;
3301 }
3302
3303 range->start = addr;
3304 range->size = size;
3305
3306 list_add_tail(&range->list, &io_range_list);
3307
3308 end_register:
3309 spin_unlock(&io_range_lock);
3310 #endif
3311
3312 return err;
3313 }
3314
3315 phys_addr_t pci_pio_to_address(unsigned long pio)
3316 {
3317 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3318
3319 #ifdef PCI_IOBASE
3320 struct io_range *range;
3321 resource_size_t allocated_size = 0;
3322
3323 if (pio > IO_SPACE_LIMIT)
3324 return address;
3325
3326 spin_lock(&io_range_lock);
3327 list_for_each_entry(range, &io_range_list, list) {
3328 if (pio >= allocated_size && pio < allocated_size + range->size) {
3329 address = range->start + pio - allocated_size;
3330 break;
3331 }
3332 allocated_size += range->size;
3333 }
3334 spin_unlock(&io_range_lock);
3335 #endif
3336
3337 return address;
3338 }
3339
3340 unsigned long __weak pci_address_to_pio(phys_addr_t address)
3341 {
3342 #ifdef PCI_IOBASE
3343 struct io_range *res;
3344 resource_size_t offset = 0;
3345 unsigned long addr = -1;
3346
3347 spin_lock(&io_range_lock);
3348 list_for_each_entry(res, &io_range_list, list) {
3349 if (address >= res->start && address < res->start + res->size) {
3350 addr = address - res->start + offset;
3351 break;
3352 }
3353 offset += res->size;
3354 }
3355 spin_unlock(&io_range_lock);
3356
3357 return addr;
3358 #else
3359 if (address > IO_SPACE_LIMIT)
3360 return (unsigned long)-1;
3361
3362 return (unsigned long) address;
3363 #endif
3364 }
3365
3366 /**
3367 * pci_remap_iospace - Remap the memory mapped I/O space
3368 * @res: Resource describing the I/O space
3369 * @phys_addr: physical address of range to be mapped
3370 *
3371 * Remap the memory mapped I/O space described by the @res
3372 * and the CPU physical address @phys_addr into virtual address space.
3373 * Only architectures that have memory mapped IO functions defined
3374 * (and the PCI_IOBASE value defined) should call this function.
3375 */
3376 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3377 {
3378 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3379 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3380
3381 if (!(res->flags & IORESOURCE_IO))
3382 return -EINVAL;
3383
3384 if (res->end > IO_SPACE_LIMIT)
3385 return -EINVAL;
3386
3387 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3388 pgprot_device(PAGE_KERNEL));
3389 #else
3390 /* this architecture does not have memory mapped I/O space,
3391 so this function should never be called */
3392 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3393 return -ENODEV;
3394 #endif
3395 }
3396 EXPORT_SYMBOL(pci_remap_iospace);
3397
3398 /**
3399 * pci_unmap_iospace - Unmap the memory mapped I/O space
3400 * @res: resource to be unmapped
3401 *
3402 * Unmap the CPU virtual address @res from virtual address space.
3403 * Only architectures that have memory mapped IO functions defined
3404 * (and the PCI_IOBASE value defined) should call this function.
3405 */
3406 void pci_unmap_iospace(struct resource *res)
3407 {
3408 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3409 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3410
3411 unmap_kernel_range(vaddr, resource_size(res));
3412 #endif
3413 }
3414 EXPORT_SYMBOL(pci_unmap_iospace);
3415
3416 /**
3417 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
3418 * @dev: Generic device to remap IO address for
3419 * @offset: Resource address to map
3420 * @size: Size of map
3421 *
3422 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
3423 * detach.
3424 */
3425 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3426 resource_size_t offset,
3427 resource_size_t size)
3428 {
3429 void __iomem **ptr, *addr;
3430
3431 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3432 if (!ptr)
3433 return NULL;
3434
3435 addr = pci_remap_cfgspace(offset, size);
3436 if (addr) {
3437 *ptr = addr;
3438 devres_add(dev, ptr);
3439 } else
3440 devres_free(ptr);
3441
3442 return addr;
3443 }
3444 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3445
3446 /**
3447 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
3448 * @dev: generic device to handle the resource for
3449 * @res: configuration space resource to be handled
3450 *
3451 * Checks that a resource is a valid memory region, requests the memory
3452 * region and ioremaps with pci_remap_cfgspace() API that ensures the
3453 * proper PCI configuration space memory attributes are guaranteed.
3454 *
3455 * All operations are managed and will be undone on driver detach.
3456 *
3457 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
3458 * on failure. Usage example:
3459 *
3460 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3461 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
3462 * if (IS_ERR(base))
3463 * return PTR_ERR(base);
3464 */
3465 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3466 struct resource *res)
3467 {
3468 resource_size_t size;
3469 const char *name;
3470 void __iomem *dest_ptr;
3471
3472 BUG_ON(!dev);
3473
3474 if (!res || resource_type(res) != IORESOURCE_MEM) {
3475 dev_err(dev, "invalid resource\n");
3476 return IOMEM_ERR_PTR(-EINVAL);
3477 }
3478
3479 size = resource_size(res);
3480 name = res->name ?: dev_name(dev);
3481
3482 if (!devm_request_mem_region(dev, res->start, size, name)) {
3483 dev_err(dev, "can't request region for resource %pR\n", res);
3484 return IOMEM_ERR_PTR(-EBUSY);
3485 }
3486
3487 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3488 if (!dest_ptr) {
3489 dev_err(dev, "ioremap failed for resource %pR\n", res);
3490 devm_release_mem_region(dev, res->start, size);
3491 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3492 }
3493
3494 return dest_ptr;
3495 }
3496 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3497
3498 static void __pci_set_master(struct pci_dev *dev, bool enable)
3499 {
3500 u16 old_cmd, cmd;
3501
3502 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3503 if (enable)
3504 cmd = old_cmd | PCI_COMMAND_MASTER;
3505 else
3506 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3507 if (cmd != old_cmd) {
3508 dev_dbg(&dev->dev, "%s bus mastering\n",
3509 enable ? "enabling" : "disabling");
3510 pci_write_config_word(dev, PCI_COMMAND, cmd);
3511 }
3512 dev->is_busmaster = enable;
3513 }
3514
3515 /**
3516 * pcibios_setup - process "pci=" kernel boot arguments
3517 * @str: string used to pass in "pci=" kernel boot arguments
3518 *
3519 * Process kernel boot arguments. This is the default implementation.
3520 * Architecture specific implementations can override this as necessary.
3521 */
3522 char * __weak __init pcibios_setup(char *str)
3523 {
3524 return str;
3525 }
3526
3527 /**
3528 * pcibios_set_master - enable PCI bus-mastering for device dev
3529 * @dev: the PCI device to enable
3530 *
3531 * Enables PCI bus-mastering for the device. This is the default
3532 * implementation. Architecture specific implementations can override
3533 * this if necessary.
3534 */
3535 void __weak pcibios_set_master(struct pci_dev *dev)
3536 {
3537 u8 lat;
3538
3539 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3540 if (pci_is_pcie(dev))
3541 return;
3542
3543 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3544 if (lat < 16)
3545 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3546 else if (lat > pcibios_max_latency)
3547 lat = pcibios_max_latency;
3548 else
3549 return;
3550
3551 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3552 }
3553
3554 /**
3555 * pci_set_master - enables bus-mastering for device dev
3556 * @dev: the PCI device to enable
3557 *
3558 * Enables bus-mastering on the device and calls pcibios_set_master()
3559 * to do the needed arch specific settings.
3560 */
3561 void pci_set_master(struct pci_dev *dev)
3562 {
3563 __pci_set_master(dev, true);
3564 pcibios_set_master(dev);
3565 }
3566 EXPORT_SYMBOL(pci_set_master);
3567
3568 /**
3569 * pci_clear_master - disables bus-mastering for device dev
3570 * @dev: the PCI device to disable
3571 */
3572 void pci_clear_master(struct pci_dev *dev)
3573 {
3574 __pci_set_master(dev, false);
3575 }
3576 EXPORT_SYMBOL(pci_clear_master);
3577
3578 /**
3579 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3580 * @dev: the PCI device for which MWI is to be enabled
3581 *
3582 * Helper function for pci_set_mwi.
3583 * Originally copied from drivers/net/acenic.c.
3584 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3585 *
3586 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3587 */
3588 int pci_set_cacheline_size(struct pci_dev *dev)
3589 {
3590 u8 cacheline_size;
3591
3592 if (!pci_cache_line_size)
3593 return -EINVAL;
3594
3595 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3596 equal to or multiple of the right value. */
3597 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3598 if (cacheline_size >= pci_cache_line_size &&
3599 (cacheline_size % pci_cache_line_size) == 0)
3600 return 0;
3601
3602 /* Write the correct value. */
3603 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3604 /* Read it back. */
3605 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3606 if (cacheline_size == pci_cache_line_size)
3607 return 0;
3608
3609 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
3610 pci_cache_line_size << 2);
3611
3612 return -EINVAL;
3613 }
3614 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3615
3616 /**
3617 * pci_set_mwi - enables memory-write-invalidate PCI transaction
3618 * @dev: the PCI device for which MWI is enabled
3619 *
3620 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3621 *
3622 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3623 */
3624 int pci_set_mwi(struct pci_dev *dev)
3625 {
3626 #ifdef PCI_DISABLE_MWI
3627 return 0;
3628 #else
3629 int rc;
3630 u16 cmd;
3631
3632 rc = pci_set_cacheline_size(dev);
3633 if (rc)
3634 return rc;
3635
3636 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3637 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3638 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
3639 cmd |= PCI_COMMAND_INVALIDATE;
3640 pci_write_config_word(dev, PCI_COMMAND, cmd);
3641 }
3642 return 0;
3643 #endif
3644 }
3645 EXPORT_SYMBOL(pci_set_mwi);
3646
3647 /**
3648 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3649 * @dev: the PCI device for which MWI is enabled
3650 *
3651 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3652 * Callers are not required to check the return value.
3653 *
3654 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3655 */
3656 int pci_try_set_mwi(struct pci_dev *dev)
3657 {
3658 #ifdef PCI_DISABLE_MWI
3659 return 0;
3660 #else
3661 return pci_set_mwi(dev);
3662 #endif
3663 }
3664 EXPORT_SYMBOL(pci_try_set_mwi);
3665
3666 /**
3667 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3668 * @dev: the PCI device to disable
3669 *
3670 * Disables PCI Memory-Write-Invalidate transaction on the device
3671 */
3672 void pci_clear_mwi(struct pci_dev *dev)
3673 {
3674 #ifndef PCI_DISABLE_MWI
3675 u16 cmd;
3676
3677 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3678 if (cmd & PCI_COMMAND_INVALIDATE) {
3679 cmd &= ~PCI_COMMAND_INVALIDATE;
3680 pci_write_config_word(dev, PCI_COMMAND, cmd);
3681 }
3682 #endif
3683 }
3684 EXPORT_SYMBOL(pci_clear_mwi);
3685
3686 /**
3687 * pci_intx - enables/disables PCI INTx for device dev
3688 * @pdev: the PCI device to operate on
3689 * @enable: boolean: whether to enable or disable PCI INTx
3690 *
3691 * Enables/disables PCI INTx for device dev
3692 */
3693 void pci_intx(struct pci_dev *pdev, int enable)
3694 {
3695 u16 pci_command, new;
3696
3697 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3698
3699 if (enable)
3700 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3701 else
3702 new = pci_command | PCI_COMMAND_INTX_DISABLE;
3703
3704 if (new != pci_command) {
3705 struct pci_devres *dr;
3706
3707 pci_write_config_word(pdev, PCI_COMMAND, new);
3708
3709 dr = find_pci_dr(pdev);
3710 if (dr && !dr->restore_intx) {
3711 dr->restore_intx = 1;
3712 dr->orig_intx = !enable;
3713 }
3714 }
3715 }
3716 EXPORT_SYMBOL_GPL(pci_intx);
3717
3718 /**
3719 * pci_intx_mask_supported - probe for INTx masking support
3720 * @dev: the PCI device to operate on
3721 *
3722 * Check if the device dev support INTx masking via the config space
3723 * command word.
3724 */
3725 bool pci_intx_mask_supported(struct pci_dev *dev)
3726 {
3727 bool mask_supported = false;
3728 u16 orig, new;
3729
3730 if (dev->broken_intx_masking)
3731 return false;
3732
3733 pci_cfg_access_lock(dev);
3734
3735 pci_read_config_word(dev, PCI_COMMAND, &orig);
3736 pci_write_config_word(dev, PCI_COMMAND,
3737 orig ^ PCI_COMMAND_INTX_DISABLE);
3738 pci_read_config_word(dev, PCI_COMMAND, &new);
3739
3740 /*
3741 * There's no way to protect against hardware bugs or detect them
3742 * reliably, but as long as we know what the value should be, let's
3743 * go ahead and check it.
3744 */
3745 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3746 dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3747 orig, new);
3748 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3749 mask_supported = true;
3750 pci_write_config_word(dev, PCI_COMMAND, orig);
3751 }
3752
3753 pci_cfg_access_unlock(dev);
3754 return mask_supported;
3755 }
3756 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3757
3758 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3759 {
3760 struct pci_bus *bus = dev->bus;
3761 bool mask_updated = true;
3762 u32 cmd_status_dword;
3763 u16 origcmd, newcmd;
3764 unsigned long flags;
3765 bool irq_pending;
3766
3767 /*
3768 * We do a single dword read to retrieve both command and status.
3769 * Document assumptions that make this possible.
3770 */
3771 BUILD_BUG_ON(PCI_COMMAND % 4);
3772 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3773
3774 raw_spin_lock_irqsave(&pci_lock, flags);
3775
3776 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3777
3778 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3779
3780 /*
3781 * Check interrupt status register to see whether our device
3782 * triggered the interrupt (when masking) or the next IRQ is
3783 * already pending (when unmasking).
3784 */
3785 if (mask != irq_pending) {
3786 mask_updated = false;
3787 goto done;
3788 }
3789
3790 origcmd = cmd_status_dword;
3791 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3792 if (mask)
3793 newcmd |= PCI_COMMAND_INTX_DISABLE;
3794 if (newcmd != origcmd)
3795 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3796
3797 done:
3798 raw_spin_unlock_irqrestore(&pci_lock, flags);
3799
3800 return mask_updated;
3801 }
3802
3803 /**
3804 * pci_check_and_mask_intx - mask INTx on pending interrupt
3805 * @dev: the PCI device to operate on
3806 *
3807 * Check if the device dev has its INTx line asserted, mask it and
3808 * return true in that case. False is returned if not interrupt was
3809 * pending.
3810 */
3811 bool pci_check_and_mask_intx(struct pci_dev *dev)
3812 {
3813 return pci_check_and_set_intx_mask(dev, true);
3814 }
3815 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3816
3817 /**
3818 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3819 * @dev: the PCI device to operate on
3820 *
3821 * Check if the device dev has its INTx line asserted, unmask it if not
3822 * and return true. False is returned and the mask remains active if
3823 * there was still an interrupt pending.
3824 */
3825 bool pci_check_and_unmask_intx(struct pci_dev *dev)
3826 {
3827 return pci_check_and_set_intx_mask(dev, false);
3828 }
3829 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3830
3831 /**
3832 * pci_wait_for_pending_transaction - waits for pending transaction
3833 * @dev: the PCI device to operate on
3834 *
3835 * Return 0 if transaction is pending 1 otherwise.
3836 */
3837 int pci_wait_for_pending_transaction(struct pci_dev *dev)
3838 {
3839 if (!pci_is_pcie(dev))
3840 return 1;
3841
3842 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3843 PCI_EXP_DEVSTA_TRPND);
3844 }
3845 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3846
3847 /*
3848 * We should only need to wait 100ms after FLR, but some devices take longer.
3849 * Wait for up to 1000ms for config space to return something other than -1.
3850 * Intel IGD requires this when an LCD panel is attached. We read the 2nd
3851 * dword because VFs don't implement the 1st dword.
3852 */
3853 static void pci_flr_wait(struct pci_dev *dev)
3854 {
3855 int i = 0;
3856 u32 id;
3857
3858 do {
3859 msleep(100);
3860 pci_read_config_dword(dev, PCI_COMMAND, &id);
3861 } while (i++ < 10 && id == ~0);
3862
3863 if (id == ~0)
3864 dev_warn(&dev->dev, "Failed to return from FLR\n");
3865 else if (i > 1)
3866 dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
3867 (i - 1) * 100);
3868 }
3869
3870 /**
3871 * pcie_has_flr - check if a device supports function level resets
3872 * @dev: device to check
3873 *
3874 * Returns true if the device advertises support for PCIe function level
3875 * resets.
3876 */
3877 static bool pcie_has_flr(struct pci_dev *dev)
3878 {
3879 u32 cap;
3880
3881 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3882 return false;
3883
3884 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3885 return cap & PCI_EXP_DEVCAP_FLR;
3886 }
3887
3888 /**
3889 * pcie_flr - initiate a PCIe function level reset
3890 * @dev: device to reset
3891 *
3892 * Initiate a function level reset on @dev. The caller should ensure the
3893 * device supports FLR before calling this function, e.g. by using the
3894 * pcie_has_flr() helper.
3895 */
3896 void pcie_flr(struct pci_dev *dev)
3897 {
3898 if (!pci_wait_for_pending_transaction(dev))
3899 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3900
3901 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3902 pci_flr_wait(dev);
3903 }
3904 EXPORT_SYMBOL_GPL(pcie_flr);
3905
3906 static int pci_af_flr(struct pci_dev *dev, int probe)
3907 {
3908 int pos;
3909 u8 cap;
3910
3911 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3912 if (!pos)
3913 return -ENOTTY;
3914
3915 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3916 return -ENOTTY;
3917
3918 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3919 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3920 return -ENOTTY;
3921
3922 if (probe)
3923 return 0;
3924
3925 /*
3926 * Wait for Transaction Pending bit to clear. A word-aligned test
3927 * is used, so we use the conrol offset rather than status and shift
3928 * the test bit to match.
3929 */
3930 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3931 PCI_AF_STATUS_TP << 8))
3932 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
3933
3934 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3935 pci_flr_wait(dev);
3936 return 0;
3937 }
3938
3939 /**
3940 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3941 * @dev: Device to reset.
3942 * @probe: If set, only check if the device can be reset this way.
3943 *
3944 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3945 * unset, it will be reinitialized internally when going from PCI_D3hot to
3946 * PCI_D0. If that's the case and the device is not in a low-power state
3947 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3948 *
3949 * NOTE: This causes the caller to sleep for twice the device power transition
3950 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3951 * by default (i.e. unless the @dev's d3_delay field has a different value).
3952 * Moreover, only devices in D0 can be reset by this function.
3953 */
3954 static int pci_pm_reset(struct pci_dev *dev, int probe)
3955 {
3956 u16 csr;
3957
3958 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3959 return -ENOTTY;
3960
3961 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3962 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3963 return -ENOTTY;
3964
3965 if (probe)
3966 return 0;
3967
3968 if (dev->current_state != PCI_D0)
3969 return -EINVAL;
3970
3971 csr &= ~PCI_PM_CTRL_STATE_MASK;
3972 csr |= PCI_D3hot;
3973 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3974 pci_dev_d3_sleep(dev);
3975
3976 csr &= ~PCI_PM_CTRL_STATE_MASK;
3977 csr |= PCI_D0;
3978 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3979 pci_dev_d3_sleep(dev);
3980
3981 return 0;
3982 }
3983
3984 void pci_reset_secondary_bus(struct pci_dev *dev)
3985 {
3986 u16 ctrl;
3987
3988 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3989 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3990 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3991 /*
3992 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
3993 * this to 2ms to ensure that we meet the minimum requirement.
3994 */
3995 msleep(2);
3996
3997 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3998 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3999
4000 /*
4001 * Trhfa for conventional PCI is 2^25 clock cycles.
4002 * Assuming a minimum 33MHz clock this results in a 1s
4003 * delay before we can consider subordinate devices to
4004 * be re-initialized. PCIe has some ways to shorten this,
4005 * but we don't make use of them yet.
4006 */
4007 ssleep(1);
4008 }
4009
4010 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4011 {
4012 pci_reset_secondary_bus(dev);
4013 }
4014
4015 /**
4016 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
4017 * @dev: Bridge device
4018 *
4019 * Use the bridge control register to assert reset on the secondary bus.
4020 * Devices on the secondary bus are left in power-on state.
4021 */
4022 void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
4023 {
4024 pcibios_reset_secondary_bus(dev);
4025 }
4026 EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
4027
4028 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4029 {
4030 struct pci_dev *pdev;
4031
4032 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4033 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4034 return -ENOTTY;
4035
4036 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4037 if (pdev != dev)
4038 return -ENOTTY;
4039
4040 if (probe)
4041 return 0;
4042
4043 pci_reset_bridge_secondary_bus(dev->bus->self);
4044
4045 return 0;
4046 }
4047
4048 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4049 {
4050 int rc = -ENOTTY;
4051
4052 if (!hotplug || !try_module_get(hotplug->ops->owner))
4053 return rc;
4054
4055 if (hotplug->ops->reset_slot)
4056 rc = hotplug->ops->reset_slot(hotplug, probe);
4057
4058 module_put(hotplug->ops->owner);
4059
4060 return rc;
4061 }
4062
4063 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4064 {
4065 struct pci_dev *pdev;
4066
4067 if (dev->subordinate || !dev->slot ||
4068 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4069 return -ENOTTY;
4070
4071 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4072 if (pdev != dev && pdev->slot == dev->slot)
4073 return -ENOTTY;
4074
4075 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4076 }
4077
4078 static int __pci_dev_reset(struct pci_dev *dev, int probe)
4079 {
4080 int rc;
4081
4082 might_sleep();
4083
4084 rc = pci_dev_specific_reset(dev, probe);
4085 if (rc != -ENOTTY)
4086 goto done;
4087
4088 if (pcie_has_flr(dev)) {
4089 if (!probe)
4090 pcie_flr(dev);
4091 rc = 0;
4092 goto done;
4093 }
4094
4095 rc = pci_af_flr(dev, probe);
4096 if (rc != -ENOTTY)
4097 goto done;
4098
4099 rc = pci_pm_reset(dev, probe);
4100 if (rc != -ENOTTY)
4101 goto done;
4102
4103 rc = pci_dev_reset_slot_function(dev, probe);
4104 if (rc != -ENOTTY)
4105 goto done;
4106
4107 rc = pci_parent_bus_reset(dev, probe);
4108 done:
4109 return rc;
4110 }
4111
4112 static void pci_dev_lock(struct pci_dev *dev)
4113 {
4114 pci_cfg_access_lock(dev);
4115 /* block PM suspend, driver probe, etc. */
4116 device_lock(&dev->dev);
4117 }
4118
4119 /* Return 1 on successful lock, 0 on contention */
4120 static int pci_dev_trylock(struct pci_dev *dev)
4121 {
4122 if (pci_cfg_access_trylock(dev)) {
4123 if (device_trylock(&dev->dev))
4124 return 1;
4125 pci_cfg_access_unlock(dev);
4126 }
4127
4128 return 0;
4129 }
4130
4131 static void pci_dev_unlock(struct pci_dev *dev)
4132 {
4133 device_unlock(&dev->dev);
4134 pci_cfg_access_unlock(dev);
4135 }
4136
4137 /**
4138 * pci_reset_notify - notify device driver of reset
4139 * @dev: device to be notified of reset
4140 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
4141 * completed
4142 *
4143 * Must be called prior to device access being disabled and after device
4144 * access is restored.
4145 */
4146 static void pci_reset_notify(struct pci_dev *dev, bool prepare)
4147 {
4148 const struct pci_error_handlers *err_handler =
4149 dev->driver ? dev->driver->err_handler : NULL;
4150 if (err_handler && err_handler->reset_notify)
4151 err_handler->reset_notify(dev, prepare);
4152 }
4153
4154 static void pci_dev_save_and_disable(struct pci_dev *dev)
4155 {
4156 pci_reset_notify(dev, true);
4157
4158 /*
4159 * Wake-up device prior to save. PM registers default to D0 after
4160 * reset and a simple register restore doesn't reliably return
4161 * to a non-D0 state anyway.
4162 */
4163 pci_set_power_state(dev, PCI_D0);
4164
4165 pci_save_state(dev);
4166 /*
4167 * Disable the device by clearing the Command register, except for
4168 * INTx-disable which is set. This not only disables MMIO and I/O port
4169 * BARs, but also prevents the device from being Bus Master, preventing
4170 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
4171 * compliant devices, INTx-disable prevents legacy interrupts.
4172 */
4173 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4174 }
4175
4176 static void pci_dev_restore(struct pci_dev *dev)
4177 {
4178 pci_restore_state(dev);
4179 pci_reset_notify(dev, false);
4180 }
4181
4182 static int pci_dev_reset(struct pci_dev *dev, int probe)
4183 {
4184 int rc;
4185
4186 if (!probe)
4187 pci_dev_lock(dev);
4188
4189 rc = __pci_dev_reset(dev, probe);
4190
4191 if (!probe)
4192 pci_dev_unlock(dev);
4193
4194 return rc;
4195 }
4196
4197 /**
4198 * __pci_reset_function - reset a PCI device function
4199 * @dev: PCI device to reset
4200 *
4201 * Some devices allow an individual function to be reset without affecting
4202 * other functions in the same device. The PCI device must be responsive
4203 * to PCI config space in order to use this function.
4204 *
4205 * The device function is presumed to be unused when this function is called.
4206 * Resetting the device will make the contents of PCI configuration space
4207 * random, so any caller of this must be prepared to reinitialise the
4208 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4209 * etc.
4210 *
4211 * Returns 0 if the device function was successfully reset or negative if the
4212 * device doesn't support resetting a single function.
4213 */
4214 int __pci_reset_function(struct pci_dev *dev)
4215 {
4216 return pci_dev_reset(dev, 0);
4217 }
4218 EXPORT_SYMBOL_GPL(__pci_reset_function);
4219
4220 /**
4221 * __pci_reset_function_locked - reset a PCI device function while holding
4222 * the @dev mutex lock.
4223 * @dev: PCI device to reset
4224 *
4225 * Some devices allow an individual function to be reset without affecting
4226 * other functions in the same device. The PCI device must be responsive
4227 * to PCI config space in order to use this function.
4228 *
4229 * The device function is presumed to be unused and the caller is holding
4230 * the device mutex lock when this function is called.
4231 * Resetting the device will make the contents of PCI configuration space
4232 * random, so any caller of this must be prepared to reinitialise the
4233 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4234 * etc.
4235 *
4236 * Returns 0 if the device function was successfully reset or negative if the
4237 * device doesn't support resetting a single function.
4238 */
4239 int __pci_reset_function_locked(struct pci_dev *dev)
4240 {
4241 return __pci_dev_reset(dev, 0);
4242 }
4243 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4244
4245 /**
4246 * pci_probe_reset_function - check whether the device can be safely reset
4247 * @dev: PCI device to reset
4248 *
4249 * Some devices allow an individual function to be reset without affecting
4250 * other functions in the same device. The PCI device must be responsive
4251 * to PCI config space in order to use this function.
4252 *
4253 * Returns 0 if the device function can be reset or negative if the
4254 * device doesn't support resetting a single function.
4255 */
4256 int pci_probe_reset_function(struct pci_dev *dev)
4257 {
4258 return pci_dev_reset(dev, 1);
4259 }
4260
4261 /**
4262 * pci_reset_function - quiesce and reset a PCI device function
4263 * @dev: PCI device to reset
4264 *
4265 * Some devices allow an individual function to be reset without affecting
4266 * other functions in the same device. The PCI device must be responsive
4267 * to PCI config space in order to use this function.
4268 *
4269 * This function does not just reset the PCI portion of a device, but
4270 * clears all the state associated with the device. This function differs
4271 * from __pci_reset_function in that it saves and restores device state
4272 * over the reset.
4273 *
4274 * Returns 0 if the device function was successfully reset or negative if the
4275 * device doesn't support resetting a single function.
4276 */
4277 int pci_reset_function(struct pci_dev *dev)
4278 {
4279 int rc;
4280
4281 rc = pci_dev_reset(dev, 1);
4282 if (rc)
4283 return rc;
4284
4285 pci_dev_save_and_disable(dev);
4286
4287 rc = pci_dev_reset(dev, 0);
4288
4289 pci_dev_restore(dev);
4290
4291 return rc;
4292 }
4293 EXPORT_SYMBOL_GPL(pci_reset_function);
4294
4295 /**
4296 * pci_try_reset_function - quiesce and reset a PCI device function
4297 * @dev: PCI device to reset
4298 *
4299 * Same as above, except return -EAGAIN if unable to lock device.
4300 */
4301 int pci_try_reset_function(struct pci_dev *dev)
4302 {
4303 int rc;
4304
4305 rc = pci_dev_reset(dev, 1);
4306 if (rc)
4307 return rc;
4308
4309 pci_dev_save_and_disable(dev);
4310
4311 if (pci_dev_trylock(dev)) {
4312 rc = __pci_dev_reset(dev, 0);
4313 pci_dev_unlock(dev);
4314 } else
4315 rc = -EAGAIN;
4316
4317 pci_dev_restore(dev);
4318
4319 return rc;
4320 }
4321 EXPORT_SYMBOL_GPL(pci_try_reset_function);
4322
4323 /* Do any devices on or below this bus prevent a bus reset? */
4324 static bool pci_bus_resetable(struct pci_bus *bus)
4325 {
4326 struct pci_dev *dev;
4327
4328 list_for_each_entry(dev, &bus->devices, bus_list) {
4329 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4330 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4331 return false;
4332 }
4333
4334 return true;
4335 }
4336
4337 /* Lock devices from the top of the tree down */
4338 static void pci_bus_lock(struct pci_bus *bus)
4339 {
4340 struct pci_dev *dev;
4341
4342 list_for_each_entry(dev, &bus->devices, bus_list) {
4343 pci_dev_lock(dev);
4344 if (dev->subordinate)
4345 pci_bus_lock(dev->subordinate);
4346 }
4347 }
4348
4349 /* Unlock devices from the bottom of the tree up */
4350 static void pci_bus_unlock(struct pci_bus *bus)
4351 {
4352 struct pci_dev *dev;
4353
4354 list_for_each_entry(dev, &bus->devices, bus_list) {
4355 if (dev->subordinate)
4356 pci_bus_unlock(dev->subordinate);
4357 pci_dev_unlock(dev);
4358 }
4359 }
4360
4361 /* Return 1 on successful lock, 0 on contention */
4362 static int pci_bus_trylock(struct pci_bus *bus)
4363 {
4364 struct pci_dev *dev;
4365
4366 list_for_each_entry(dev, &bus->devices, bus_list) {
4367 if (!pci_dev_trylock(dev))
4368 goto unlock;
4369 if (dev->subordinate) {
4370 if (!pci_bus_trylock(dev->subordinate)) {
4371 pci_dev_unlock(dev);
4372 goto unlock;
4373 }
4374 }
4375 }
4376 return 1;
4377
4378 unlock:
4379 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4380 if (dev->subordinate)
4381 pci_bus_unlock(dev->subordinate);
4382 pci_dev_unlock(dev);
4383 }
4384 return 0;
4385 }
4386
4387 /* Do any devices on or below this slot prevent a bus reset? */
4388 static bool pci_slot_resetable(struct pci_slot *slot)
4389 {
4390 struct pci_dev *dev;
4391
4392 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4393 if (!dev->slot || dev->slot != slot)
4394 continue;
4395 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4396 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4397 return false;
4398 }
4399
4400 return true;
4401 }
4402
4403 /* Lock devices from the top of the tree down */
4404 static void pci_slot_lock(struct pci_slot *slot)
4405 {
4406 struct pci_dev *dev;
4407
4408 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4409 if (!dev->slot || dev->slot != slot)
4410 continue;
4411 pci_dev_lock(dev);
4412 if (dev->subordinate)
4413 pci_bus_lock(dev->subordinate);
4414 }
4415 }
4416
4417 /* Unlock devices from the bottom of the tree up */
4418 static void pci_slot_unlock(struct pci_slot *slot)
4419 {
4420 struct pci_dev *dev;
4421
4422 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4423 if (!dev->slot || dev->slot != slot)
4424 continue;
4425 if (dev->subordinate)
4426 pci_bus_unlock(dev->subordinate);
4427 pci_dev_unlock(dev);
4428 }
4429 }
4430
4431 /* Return 1 on successful lock, 0 on contention */
4432 static int pci_slot_trylock(struct pci_slot *slot)
4433 {
4434 struct pci_dev *dev;
4435
4436 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4437 if (!dev->slot || dev->slot != slot)
4438 continue;
4439 if (!pci_dev_trylock(dev))
4440 goto unlock;
4441 if (dev->subordinate) {
4442 if (!pci_bus_trylock(dev->subordinate)) {
4443 pci_dev_unlock(dev);
4444 goto unlock;
4445 }
4446 }
4447 }
4448 return 1;
4449
4450 unlock:
4451 list_for_each_entry_continue_reverse(dev,
4452 &slot->bus->devices, bus_list) {
4453 if (!dev->slot || dev->slot != slot)
4454 continue;
4455 if (dev->subordinate)
4456 pci_bus_unlock(dev->subordinate);
4457 pci_dev_unlock(dev);
4458 }
4459 return 0;
4460 }
4461
4462 /* Save and disable devices from the top of the tree down */
4463 static void pci_bus_save_and_disable(struct pci_bus *bus)
4464 {
4465 struct pci_dev *dev;
4466
4467 list_for_each_entry(dev, &bus->devices, bus_list) {
4468 pci_dev_save_and_disable(dev);
4469 if (dev->subordinate)
4470 pci_bus_save_and_disable(dev->subordinate);
4471 }
4472 }
4473
4474 /*
4475 * Restore devices from top of the tree down - parent bridges need to be
4476 * restored before we can get to subordinate devices.
4477 */
4478 static void pci_bus_restore(struct pci_bus *bus)
4479 {
4480 struct pci_dev *dev;
4481
4482 list_for_each_entry(dev, &bus->devices, bus_list) {
4483 pci_dev_restore(dev);
4484 if (dev->subordinate)
4485 pci_bus_restore(dev->subordinate);
4486 }
4487 }
4488
4489 /* Save and disable devices from the top of the tree down */
4490 static void pci_slot_save_and_disable(struct pci_slot *slot)
4491 {
4492 struct pci_dev *dev;
4493
4494 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4495 if (!dev->slot || dev->slot != slot)
4496 continue;
4497 pci_dev_save_and_disable(dev);
4498 if (dev->subordinate)
4499 pci_bus_save_and_disable(dev->subordinate);
4500 }
4501 }
4502
4503 /*
4504 * Restore devices from top of the tree down - parent bridges need to be
4505 * restored before we can get to subordinate devices.
4506 */
4507 static void pci_slot_restore(struct pci_slot *slot)
4508 {
4509 struct pci_dev *dev;
4510
4511 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4512 if (!dev->slot || dev->slot != slot)
4513 continue;
4514 pci_dev_restore(dev);
4515 if (dev->subordinate)
4516 pci_bus_restore(dev->subordinate);
4517 }
4518 }
4519
4520 static int pci_slot_reset(struct pci_slot *slot, int probe)
4521 {
4522 int rc;
4523
4524 if (!slot || !pci_slot_resetable(slot))
4525 return -ENOTTY;
4526
4527 if (!probe)
4528 pci_slot_lock(slot);
4529
4530 might_sleep();
4531
4532 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4533
4534 if (!probe)
4535 pci_slot_unlock(slot);
4536
4537 return rc;
4538 }
4539
4540 /**
4541 * pci_probe_reset_slot - probe whether a PCI slot can be reset
4542 * @slot: PCI slot to probe
4543 *
4544 * Return 0 if slot can be reset, negative if a slot reset is not supported.
4545 */
4546 int pci_probe_reset_slot(struct pci_slot *slot)
4547 {
4548 return pci_slot_reset(slot, 1);
4549 }
4550 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4551
4552 /**
4553 * pci_reset_slot - reset a PCI slot
4554 * @slot: PCI slot to reset
4555 *
4556 * A PCI bus may host multiple slots, each slot may support a reset mechanism
4557 * independent of other slots. For instance, some slots may support slot power
4558 * control. In the case of a 1:1 bus to slot architecture, this function may
4559 * wrap the bus reset to avoid spurious slot related events such as hotplug.
4560 * Generally a slot reset should be attempted before a bus reset. All of the
4561 * function of the slot and any subordinate buses behind the slot are reset
4562 * through this function. PCI config space of all devices in the slot and
4563 * behind the slot is saved before and restored after reset.
4564 *
4565 * Return 0 on success, non-zero on error.
4566 */
4567 int pci_reset_slot(struct pci_slot *slot)
4568 {
4569 int rc;
4570
4571 rc = pci_slot_reset(slot, 1);
4572 if (rc)
4573 return rc;
4574
4575 pci_slot_save_and_disable(slot);
4576
4577 rc = pci_slot_reset(slot, 0);
4578
4579 pci_slot_restore(slot);
4580
4581 return rc;
4582 }
4583 EXPORT_SYMBOL_GPL(pci_reset_slot);
4584
4585 /**
4586 * pci_try_reset_slot - Try to reset a PCI slot
4587 * @slot: PCI slot to reset
4588 *
4589 * Same as above except return -EAGAIN if the slot cannot be locked
4590 */
4591 int pci_try_reset_slot(struct pci_slot *slot)
4592 {
4593 int rc;
4594
4595 rc = pci_slot_reset(slot, 1);
4596 if (rc)
4597 return rc;
4598
4599 pci_slot_save_and_disable(slot);
4600
4601 if (pci_slot_trylock(slot)) {
4602 might_sleep();
4603 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4604 pci_slot_unlock(slot);
4605 } else
4606 rc = -EAGAIN;
4607
4608 pci_slot_restore(slot);
4609
4610 return rc;
4611 }
4612 EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4613
4614 static int pci_bus_reset(struct pci_bus *bus, int probe)
4615 {
4616 if (!bus->self || !pci_bus_resetable(bus))
4617 return -ENOTTY;
4618
4619 if (probe)
4620 return 0;
4621
4622 pci_bus_lock(bus);
4623
4624 might_sleep();
4625
4626 pci_reset_bridge_secondary_bus(bus->self);
4627
4628 pci_bus_unlock(bus);
4629
4630 return 0;
4631 }
4632
4633 /**
4634 * pci_probe_reset_bus - probe whether a PCI bus can be reset
4635 * @bus: PCI bus to probe
4636 *
4637 * Return 0 if bus can be reset, negative if a bus reset is not supported.
4638 */
4639 int pci_probe_reset_bus(struct pci_bus *bus)
4640 {
4641 return pci_bus_reset(bus, 1);
4642 }
4643 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4644
4645 /**
4646 * pci_reset_bus - reset a PCI bus
4647 * @bus: top level PCI bus to reset
4648 *
4649 * Do a bus reset on the given bus and any subordinate buses, saving
4650 * and restoring state of all devices.
4651 *
4652 * Return 0 on success, non-zero on error.
4653 */
4654 int pci_reset_bus(struct pci_bus *bus)
4655 {
4656 int rc;
4657
4658 rc = pci_bus_reset(bus, 1);
4659 if (rc)
4660 return rc;
4661
4662 pci_bus_save_and_disable(bus);
4663
4664 rc = pci_bus_reset(bus, 0);
4665
4666 pci_bus_restore(bus);
4667
4668 return rc;
4669 }
4670 EXPORT_SYMBOL_GPL(pci_reset_bus);
4671
4672 /**
4673 * pci_try_reset_bus - Try to reset a PCI bus
4674 * @bus: top level PCI bus to reset
4675 *
4676 * Same as above except return -EAGAIN if the bus cannot be locked
4677 */
4678 int pci_try_reset_bus(struct pci_bus *bus)
4679 {
4680 int rc;
4681
4682 rc = pci_bus_reset(bus, 1);
4683 if (rc)
4684 return rc;
4685
4686 pci_bus_save_and_disable(bus);
4687
4688 if (pci_bus_trylock(bus)) {
4689 might_sleep();
4690 pci_reset_bridge_secondary_bus(bus->self);
4691 pci_bus_unlock(bus);
4692 } else
4693 rc = -EAGAIN;
4694
4695 pci_bus_restore(bus);
4696
4697 return rc;
4698 }
4699 EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4700
4701 /**
4702 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4703 * @dev: PCI device to query
4704 *
4705 * Returns mmrbc: maximum designed memory read count in bytes
4706 * or appropriate error value.
4707 */
4708 int pcix_get_max_mmrbc(struct pci_dev *dev)
4709 {
4710 int cap;
4711 u32 stat;
4712
4713 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4714 if (!cap)
4715 return -EINVAL;
4716
4717 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4718 return -EINVAL;
4719
4720 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4721 }
4722 EXPORT_SYMBOL(pcix_get_max_mmrbc);
4723
4724 /**
4725 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4726 * @dev: PCI device to query
4727 *
4728 * Returns mmrbc: maximum memory read count in bytes
4729 * or appropriate error value.
4730 */
4731 int pcix_get_mmrbc(struct pci_dev *dev)
4732 {
4733 int cap;
4734 u16 cmd;
4735
4736 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4737 if (!cap)
4738 return -EINVAL;
4739
4740 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4741 return -EINVAL;
4742
4743 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4744 }
4745 EXPORT_SYMBOL(pcix_get_mmrbc);
4746
4747 /**
4748 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4749 * @dev: PCI device to query
4750 * @mmrbc: maximum memory read count in bytes
4751 * valid values are 512, 1024, 2048, 4096
4752 *
4753 * If possible sets maximum memory read byte count, some bridges have erratas
4754 * that prevent this.
4755 */
4756 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4757 {
4758 int cap;
4759 u32 stat, v, o;
4760 u16 cmd;
4761
4762 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4763 return -EINVAL;
4764
4765 v = ffs(mmrbc) - 10;
4766
4767 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4768 if (!cap)
4769 return -EINVAL;
4770
4771 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4772 return -EINVAL;
4773
4774 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4775 return -E2BIG;
4776
4777 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4778 return -EINVAL;
4779
4780 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4781 if (o != v) {
4782 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
4783 return -EIO;
4784
4785 cmd &= ~PCI_X_CMD_MAX_READ;
4786 cmd |= v << 2;
4787 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4788 return -EIO;
4789 }
4790 return 0;
4791 }
4792 EXPORT_SYMBOL(pcix_set_mmrbc);
4793
4794 /**
4795 * pcie_get_readrq - get PCI Express read request size
4796 * @dev: PCI device to query
4797 *
4798 * Returns maximum memory read request in bytes
4799 * or appropriate error value.
4800 */
4801 int pcie_get_readrq(struct pci_dev *dev)
4802 {
4803 u16 ctl;
4804
4805 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4806
4807 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4808 }
4809 EXPORT_SYMBOL(pcie_get_readrq);
4810
4811 /**
4812 * pcie_set_readrq - set PCI Express maximum memory read request
4813 * @dev: PCI device to query
4814 * @rq: maximum memory read count in bytes
4815 * valid values are 128, 256, 512, 1024, 2048, 4096
4816 *
4817 * If possible sets maximum memory read request in bytes
4818 */
4819 int pcie_set_readrq(struct pci_dev *dev, int rq)
4820 {
4821 u16 v;
4822
4823 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
4824 return -EINVAL;
4825
4826 /*
4827 * If using the "performance" PCIe config, we clamp the
4828 * read rq size to the max packet size to prevent the
4829 * host bridge generating requests larger than we can
4830 * cope with
4831 */
4832 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4833 int mps = pcie_get_mps(dev);
4834
4835 if (mps < rq)
4836 rq = mps;
4837 }
4838
4839 v = (ffs(rq) - 8) << 12;
4840
4841 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4842 PCI_EXP_DEVCTL_READRQ, v);
4843 }
4844 EXPORT_SYMBOL(pcie_set_readrq);
4845
4846 /**
4847 * pcie_get_mps - get PCI Express maximum payload size
4848 * @dev: PCI device to query
4849 *
4850 * Returns maximum payload size in bytes
4851 */
4852 int pcie_get_mps(struct pci_dev *dev)
4853 {
4854 u16 ctl;
4855
4856 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4857
4858 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4859 }
4860 EXPORT_SYMBOL(pcie_get_mps);
4861
4862 /**
4863 * pcie_set_mps - set PCI Express maximum payload size
4864 * @dev: PCI device to query
4865 * @mps: maximum payload size in bytes
4866 * valid values are 128, 256, 512, 1024, 2048, 4096
4867 *
4868 * If possible sets maximum payload size
4869 */
4870 int pcie_set_mps(struct pci_dev *dev, int mps)
4871 {
4872 u16 v;
4873
4874 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
4875 return -EINVAL;
4876
4877 v = ffs(mps) - 8;
4878 if (v > dev->pcie_mpss)
4879 return -EINVAL;
4880 v <<= 5;
4881
4882 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4883 PCI_EXP_DEVCTL_PAYLOAD, v);
4884 }
4885 EXPORT_SYMBOL(pcie_set_mps);
4886
4887 /**
4888 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4889 * @dev: PCI device to query
4890 * @speed: storage for minimum speed
4891 * @width: storage for minimum width
4892 *
4893 * This function will walk up the PCI device chain and determine the minimum
4894 * link width and speed of the device.
4895 */
4896 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4897 enum pcie_link_width *width)
4898 {
4899 int ret;
4900
4901 *speed = PCI_SPEED_UNKNOWN;
4902 *width = PCIE_LNK_WIDTH_UNKNOWN;
4903
4904 while (dev) {
4905 u16 lnksta;
4906 enum pci_bus_speed next_speed;
4907 enum pcie_link_width next_width;
4908
4909 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4910 if (ret)
4911 return ret;
4912
4913 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4914 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4915 PCI_EXP_LNKSTA_NLW_SHIFT;
4916
4917 if (next_speed < *speed)
4918 *speed = next_speed;
4919
4920 if (next_width < *width)
4921 *width = next_width;
4922
4923 dev = dev->bus->self;
4924 }
4925
4926 return 0;
4927 }
4928 EXPORT_SYMBOL(pcie_get_minimum_link);
4929
4930 /**
4931 * pci_select_bars - Make BAR mask from the type of resource
4932 * @dev: the PCI device for which BAR mask is made
4933 * @flags: resource type mask to be selected
4934 *
4935 * This helper routine makes bar mask from the type of resource.
4936 */
4937 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4938 {
4939 int i, bars = 0;
4940 for (i = 0; i < PCI_NUM_RESOURCES; i++)
4941 if (pci_resource_flags(dev, i) & flags)
4942 bars |= (1 << i);
4943 return bars;
4944 }
4945 EXPORT_SYMBOL(pci_select_bars);
4946
4947 /* Some architectures require additional programming to enable VGA */
4948 static arch_set_vga_state_t arch_set_vga_state;
4949
4950 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4951 {
4952 arch_set_vga_state = func; /* NULL disables */
4953 }
4954
4955 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4956 unsigned int command_bits, u32 flags)
4957 {
4958 if (arch_set_vga_state)
4959 return arch_set_vga_state(dev, decode, command_bits,
4960 flags);
4961 return 0;
4962 }
4963
4964 /**
4965 * pci_set_vga_state - set VGA decode state on device and parents if requested
4966 * @dev: the PCI device
4967 * @decode: true = enable decoding, false = disable decoding
4968 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
4969 * @flags: traverse ancestors and change bridges
4970 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
4971 */
4972 int pci_set_vga_state(struct pci_dev *dev, bool decode,
4973 unsigned int command_bits, u32 flags)
4974 {
4975 struct pci_bus *bus;
4976 struct pci_dev *bridge;
4977 u16 cmd;
4978 int rc;
4979
4980 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
4981
4982 /* ARCH specific VGA enables */
4983 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
4984 if (rc)
4985 return rc;
4986
4987 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4988 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4989 if (decode == true)
4990 cmd |= command_bits;
4991 else
4992 cmd &= ~command_bits;
4993 pci_write_config_word(dev, PCI_COMMAND, cmd);
4994 }
4995
4996 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
4997 return 0;
4998
4999 bus = dev->bus;
5000 while (bus) {
5001 bridge = bus->self;
5002 if (bridge) {
5003 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5004 &cmd);
5005 if (decode == true)
5006 cmd |= PCI_BRIDGE_CTL_VGA;
5007 else
5008 cmd &= ~PCI_BRIDGE_CTL_VGA;
5009 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5010 cmd);
5011 }
5012 bus = bus->parent;
5013 }
5014 return 0;
5015 }
5016
5017 /**
5018 * pci_add_dma_alias - Add a DMA devfn alias for a device
5019 * @dev: the PCI device for which alias is added
5020 * @devfn: alias slot and function
5021 *
5022 * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
5023 * It should be called early, preferably as PCI fixup header quirk.
5024 */
5025 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5026 {
5027 if (!dev->dma_alias_mask)
5028 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5029 sizeof(long), GFP_KERNEL);
5030 if (!dev->dma_alias_mask) {
5031 dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
5032 return;
5033 }
5034
5035 set_bit(devfn, dev->dma_alias_mask);
5036 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
5037 PCI_SLOT(devfn), PCI_FUNC(devfn));
5038 }
5039
5040 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5041 {
5042 return (dev1->dma_alias_mask &&
5043 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5044 (dev2->dma_alias_mask &&
5045 test_bit(dev1->devfn, dev2->dma_alias_mask));
5046 }
5047
5048 bool pci_device_is_present(struct pci_dev *pdev)
5049 {
5050 u32 v;
5051
5052 if (pci_dev_is_disconnected(pdev))
5053 return false;
5054 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5055 }
5056 EXPORT_SYMBOL_GPL(pci_device_is_present);
5057
5058 void pci_ignore_hotplug(struct pci_dev *dev)
5059 {
5060 struct pci_dev *bridge = dev->bus->self;
5061
5062 dev->ignore_hotplug = 1;
5063 /* Propagate the "ignore hotplug" setting to the parent bridge. */
5064 if (bridge)
5065 bridge->ignore_hotplug = 1;
5066 }
5067 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5068
5069 resource_size_t __weak pcibios_default_alignment(void)
5070 {
5071 return 0;
5072 }
5073
5074 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
5075 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
5076 static DEFINE_SPINLOCK(resource_alignment_lock);
5077
5078 /**
5079 * pci_specified_resource_alignment - get resource alignment specified by user.
5080 * @dev: the PCI device to get
5081 * @resize: whether or not to change resources' size when reassigning alignment
5082 *
5083 * RETURNS: Resource alignment if it is specified.
5084 * Zero if it is not specified.
5085 */
5086 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5087 bool *resize)
5088 {
5089 int seg, bus, slot, func, align_order, count;
5090 unsigned short vendor, device, subsystem_vendor, subsystem_device;
5091 resource_size_t align = pcibios_default_alignment();
5092 char *p;
5093
5094 spin_lock(&resource_alignment_lock);
5095 p = resource_alignment_param;
5096 if (!*p && !align)
5097 goto out;
5098 if (pci_has_flag(PCI_PROBE_ONLY)) {
5099 align = 0;
5100 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5101 goto out;
5102 }
5103
5104 while (*p) {
5105 count = 0;
5106 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5107 p[count] == '@') {
5108 p += count + 1;
5109 } else {
5110 align_order = -1;
5111 }
5112 if (strncmp(p, "pci:", 4) == 0) {
5113 /* PCI vendor/device (subvendor/subdevice) ids are specified */
5114 p += 4;
5115 if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5116 &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5117 if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5118 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5119 p);
5120 break;
5121 }
5122 subsystem_vendor = subsystem_device = 0;
5123 }
5124 p += count;
5125 if ((!vendor || (vendor == dev->vendor)) &&
5126 (!device || (device == dev->device)) &&
5127 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5128 (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5129 *resize = true;
5130 if (align_order == -1)
5131 align = PAGE_SIZE;
5132 else
5133 align = 1 << align_order;
5134 /* Found */
5135 break;
5136 }
5137 }
5138 else {
5139 if (sscanf(p, "%x:%x:%x.%x%n",
5140 &seg, &bus, &slot, &func, &count) != 4) {
5141 seg = 0;
5142 if (sscanf(p, "%x:%x.%x%n",
5143 &bus, &slot, &func, &count) != 3) {
5144 /* Invalid format */
5145 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5146 p);
5147 break;
5148 }
5149 }
5150 p += count;
5151 if (seg == pci_domain_nr(dev->bus) &&
5152 bus == dev->bus->number &&
5153 slot == PCI_SLOT(dev->devfn) &&
5154 func == PCI_FUNC(dev->devfn)) {
5155 *resize = true;
5156 if (align_order == -1)
5157 align = PAGE_SIZE;
5158 else
5159 align = 1 << align_order;
5160 /* Found */
5161 break;
5162 }
5163 }
5164 if (*p != ';' && *p != ',') {
5165 /* End of param or invalid format */
5166 break;
5167 }
5168 p++;
5169 }
5170 out:
5171 spin_unlock(&resource_alignment_lock);
5172 return align;
5173 }
5174
5175 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5176 resource_size_t align, bool resize)
5177 {
5178 struct resource *r = &dev->resource[bar];
5179 resource_size_t size;
5180
5181 if (!(r->flags & IORESOURCE_MEM))
5182 return;
5183
5184 if (r->flags & IORESOURCE_PCI_FIXED) {
5185 dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5186 bar, r, (unsigned long long)align);
5187 return;
5188 }
5189
5190 size = resource_size(r);
5191 if (size >= align)
5192 return;
5193
5194 /*
5195 * Increase the alignment of the resource. There are two ways we
5196 * can do this:
5197 *
5198 * 1) Increase the size of the resource. BARs are aligned on their
5199 * size, so when we reallocate space for this resource, we'll
5200 * allocate it with the larger alignment. This also prevents
5201 * assignment of any other BARs inside the alignment region, so
5202 * if we're requesting page alignment, this means no other BARs
5203 * will share the page.
5204 *
5205 * The disadvantage is that this makes the resource larger than
5206 * the hardware BAR, which may break drivers that compute things
5207 * based on the resource size, e.g., to find registers at a
5208 * fixed offset before the end of the BAR.
5209 *
5210 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
5211 * set r->start to the desired alignment. By itself this
5212 * doesn't prevent other BARs being put inside the alignment
5213 * region, but if we realign *every* resource of every device in
5214 * the system, none of them will share an alignment region.
5215 *
5216 * When the user has requested alignment for only some devices via
5217 * the "pci=resource_alignment" argument, "resize" is true and we
5218 * use the first method. Otherwise we assume we're aligning all
5219 * devices and we use the second.
5220 */
5221
5222 dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n",
5223 bar, r, (unsigned long long)align);
5224
5225 if (resize) {
5226 r->start = 0;
5227 r->end = align - 1;
5228 } else {
5229 r->flags &= ~IORESOURCE_SIZEALIGN;
5230 r->flags |= IORESOURCE_STARTALIGN;
5231 r->start = align;
5232 r->end = r->start + size - 1;
5233 }
5234 r->flags |= IORESOURCE_UNSET;
5235 }
5236
5237 /*
5238 * This function disables memory decoding and releases memory resources
5239 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
5240 * It also rounds up size to specified alignment.
5241 * Later on, the kernel will assign page-aligned memory resource back
5242 * to the device.
5243 */
5244 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5245 {
5246 int i;
5247 struct resource *r;
5248 resource_size_t align;
5249 u16 command;
5250 bool resize = false;
5251
5252 /*
5253 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
5254 * 3.4.1.11. Their resources are allocated from the space
5255 * described by the VF BARx register in the PF's SR-IOV capability.
5256 * We can't influence their alignment here.
5257 */
5258 if (dev->is_virtfn)
5259 return;
5260
5261 /* check if specified PCI is target device to reassign */
5262 align = pci_specified_resource_alignment(dev, &resize);
5263 if (!align)
5264 return;
5265
5266 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5267 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5268 dev_warn(&dev->dev,
5269 "Can't reassign resources to host bridge.\n");
5270 return;
5271 }
5272
5273 dev_info(&dev->dev,
5274 "Disabling memory decoding and releasing memory resources.\n");
5275 pci_read_config_word(dev, PCI_COMMAND, &command);
5276 command &= ~PCI_COMMAND_MEMORY;
5277 pci_write_config_word(dev, PCI_COMMAND, command);
5278
5279 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5280 pci_request_resource_alignment(dev, i, align, resize);
5281
5282 /*
5283 * Need to disable bridge's resource window,
5284 * to enable the kernel to reassign new resource
5285 * window later on.
5286 */
5287 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5288 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5289 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5290 r = &dev->resource[i];
5291 if (!(r->flags & IORESOURCE_MEM))
5292 continue;
5293 r->flags |= IORESOURCE_UNSET;
5294 r->end = resource_size(r) - 1;
5295 r->start = 0;
5296 }
5297 pci_disable_bridge_window(dev);
5298 }
5299 }
5300
5301 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5302 {
5303 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5304 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5305 spin_lock(&resource_alignment_lock);
5306 strncpy(resource_alignment_param, buf, count);
5307 resource_alignment_param[count] = '\0';
5308 spin_unlock(&resource_alignment_lock);
5309 return count;
5310 }
5311
5312 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5313 {
5314 size_t count;
5315 spin_lock(&resource_alignment_lock);
5316 count = snprintf(buf, size, "%s", resource_alignment_param);
5317 spin_unlock(&resource_alignment_lock);
5318 return count;
5319 }
5320
5321 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5322 {
5323 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5324 }
5325
5326 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5327 const char *buf, size_t count)
5328 {
5329 return pci_set_resource_alignment_param(buf, count);
5330 }
5331
5332 static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5333 pci_resource_alignment_store);
5334
5335 static int __init pci_resource_alignment_sysfs_init(void)
5336 {
5337 return bus_create_file(&pci_bus_type,
5338 &bus_attr_resource_alignment);
5339 }
5340 late_initcall(pci_resource_alignment_sysfs_init);
5341
5342 static void pci_no_domains(void)
5343 {
5344 #ifdef CONFIG_PCI_DOMAINS
5345 pci_domains_supported = 0;
5346 #endif
5347 }
5348
5349 #ifdef CONFIG_PCI_DOMAINS
5350 static atomic_t __domain_nr = ATOMIC_INIT(-1);
5351
5352 int pci_get_new_domain_nr(void)
5353 {
5354 return atomic_inc_return(&__domain_nr);
5355 }
5356
5357 #ifdef CONFIG_PCI_DOMAINS_GENERIC
5358 static int of_pci_bus_find_domain_nr(struct device *parent)
5359 {
5360 static int use_dt_domains = -1;
5361 int domain = -1;
5362
5363 if (parent)
5364 domain = of_get_pci_domain_nr(parent->of_node);
5365 /*
5366 * Check DT domain and use_dt_domains values.
5367 *
5368 * If DT domain property is valid (domain >= 0) and
5369 * use_dt_domains != 0, the DT assignment is valid since this means
5370 * we have not previously allocated a domain number by using
5371 * pci_get_new_domain_nr(); we should also update use_dt_domains to
5372 * 1, to indicate that we have just assigned a domain number from
5373 * DT.
5374 *
5375 * If DT domain property value is not valid (ie domain < 0), and we
5376 * have not previously assigned a domain number from DT
5377 * (use_dt_domains != 1) we should assign a domain number by
5378 * using the:
5379 *
5380 * pci_get_new_domain_nr()
5381 *
5382 * API and update the use_dt_domains value to keep track of method we
5383 * are using to assign domain numbers (use_dt_domains = 0).
5384 *
5385 * All other combinations imply we have a platform that is trying
5386 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
5387 * which is a recipe for domain mishandling and it is prevented by
5388 * invalidating the domain value (domain = -1) and printing a
5389 * corresponding error.
5390 */
5391 if (domain >= 0 && use_dt_domains) {
5392 use_dt_domains = 1;
5393 } else if (domain < 0 && use_dt_domains != 1) {
5394 use_dt_domains = 0;
5395 domain = pci_get_new_domain_nr();
5396 } else {
5397 dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
5398 parent->of_node->full_name);
5399 domain = -1;
5400 }
5401
5402 return domain;
5403 }
5404
5405 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5406 {
5407 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5408 acpi_pci_bus_find_domain_nr(bus);
5409 }
5410 #endif
5411 #endif
5412
5413 /**
5414 * pci_ext_cfg_avail - can we access extended PCI config space?
5415 *
5416 * Returns 1 if we can access PCI extended config space (offsets
5417 * greater than 0xff). This is the default implementation. Architecture
5418 * implementations can override this.
5419 */
5420 int __weak pci_ext_cfg_avail(void)
5421 {
5422 return 1;
5423 }
5424
5425 void __weak pci_fixup_cardbus(struct pci_bus *bus)
5426 {
5427 }
5428 EXPORT_SYMBOL(pci_fixup_cardbus);
5429
5430 static int __init pci_setup(char *str)
5431 {
5432 while (str) {
5433 char *k = strchr(str, ',');
5434 if (k)
5435 *k++ = 0;
5436 if (*str && (str = pcibios_setup(str)) && *str) {
5437 if (!strcmp(str, "nomsi")) {
5438 pci_no_msi();
5439 } else if (!strcmp(str, "noaer")) {
5440 pci_no_aer();
5441 } else if (!strncmp(str, "realloc=", 8)) {
5442 pci_realloc_get_opt(str + 8);
5443 } else if (!strncmp(str, "realloc", 7)) {
5444 pci_realloc_get_opt("on");
5445 } else if (!strcmp(str, "nodomains")) {
5446 pci_no_domains();
5447 } else if (!strncmp(str, "noari", 5)) {
5448 pcie_ari_disabled = true;
5449 } else if (!strncmp(str, "cbiosize=", 9)) {
5450 pci_cardbus_io_size = memparse(str + 9, &str);
5451 } else if (!strncmp(str, "cbmemsize=", 10)) {
5452 pci_cardbus_mem_size = memparse(str + 10, &str);
5453 } else if (!strncmp(str, "resource_alignment=", 19)) {
5454 pci_set_resource_alignment_param(str + 19,
5455 strlen(str + 19));
5456 } else if (!strncmp(str, "ecrc=", 5)) {
5457 pcie_ecrc_get_policy(str + 5);
5458 } else if (!strncmp(str, "hpiosize=", 9)) {
5459 pci_hotplug_io_size = memparse(str + 9, &str);
5460 } else if (!strncmp(str, "hpmemsize=", 10)) {
5461 pci_hotplug_mem_size = memparse(str + 10, &str);
5462 } else if (!strncmp(str, "hpbussize=", 10)) {
5463 pci_hotplug_bus_size =
5464 simple_strtoul(str + 10, &str, 0);
5465 if (pci_hotplug_bus_size > 0xff)
5466 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5467 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5468 pcie_bus_config = PCIE_BUS_TUNE_OFF;
5469 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
5470 pcie_bus_config = PCIE_BUS_SAFE;
5471 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
5472 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5473 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5474 pcie_bus_config = PCIE_BUS_PEER2PEER;
5475 } else if (!strncmp(str, "pcie_scan_all", 13)) {
5476 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
5477 } else {
5478 printk(KERN_ERR "PCI: Unknown option `%s'\n",
5479 str);
5480 }
5481 }
5482 str = k;
5483 }
5484 return 0;
5485 }
5486 early_param("pci", pci_setup);