]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/pci/pci.c
PCI / PM: Always check PME wakeup capability for runtime wakeup support
[mirror_ubuntu-artful-kernel.git] / drivers / pci / pci.c
1 /*
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/dmi.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci.h>
18 #include <linux/pm.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/log2.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/pm_wakeup.h>
26 #include <linux/interrupt.h>
27 #include <linux/device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/pci_hotplug.h>
30 #include <linux/vmalloc.h>
31 #include <linux/pci-ats.h>
32 #include <asm/setup.h>
33 #include <asm/dma.h>
34 #include <linux/aer.h>
35 #include "pci.h"
36
37 const char *pci_power_names[] = {
38 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
39 };
40 EXPORT_SYMBOL_GPL(pci_power_names);
41
42 int isa_dma_bridge_buggy;
43 EXPORT_SYMBOL(isa_dma_bridge_buggy);
44
45 int pci_pci_problems;
46 EXPORT_SYMBOL(pci_pci_problems);
47
48 unsigned int pci_pm_d3_delay;
49
50 static void pci_pme_list_scan(struct work_struct *work);
51
52 static LIST_HEAD(pci_pme_list);
53 static DEFINE_MUTEX(pci_pme_list_mutex);
54 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
55
56 struct pci_pme_device {
57 struct list_head list;
58 struct pci_dev *dev;
59 };
60
61 #define PME_TIMEOUT 1000 /* How long between PME checks */
62
63 static void pci_dev_d3_sleep(struct pci_dev *dev)
64 {
65 unsigned int delay = dev->d3_delay;
66
67 if (delay < pci_pm_d3_delay)
68 delay = pci_pm_d3_delay;
69
70 if (delay)
71 msleep(delay);
72 }
73
74 #ifdef CONFIG_PCI_DOMAINS
75 int pci_domains_supported = 1;
76 #endif
77
78 #define DEFAULT_CARDBUS_IO_SIZE (256)
79 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
80 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
81 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
82 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
83
84 #define DEFAULT_HOTPLUG_IO_SIZE (256)
85 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
86 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
87 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
88 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
89
90 #define DEFAULT_HOTPLUG_BUS_SIZE 1
91 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
92
93 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
94
95 /*
96 * The default CLS is used if arch didn't set CLS explicitly and not
97 * all pci devices agree on the same value. Arch can override either
98 * the dfl or actual value as it sees fit. Don't forget this is
99 * measured in 32-bit words, not bytes.
100 */
101 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
102 u8 pci_cache_line_size;
103
104 /*
105 * If we set up a device for bus mastering, we need to check the latency
106 * timer as certain BIOSes forget to set it properly.
107 */
108 unsigned int pcibios_max_latency = 255;
109
110 /* If set, the PCIe ARI capability will not be used. */
111 static bool pcie_ari_disabled;
112
113 /* Disable bridge_d3 for all PCIe ports */
114 static bool pci_bridge_d3_disable;
115 /* Force bridge_d3 for all PCIe ports */
116 static bool pci_bridge_d3_force;
117
118 static int __init pcie_port_pm_setup(char *str)
119 {
120 if (!strcmp(str, "off"))
121 pci_bridge_d3_disable = true;
122 else if (!strcmp(str, "force"))
123 pci_bridge_d3_force = true;
124 return 1;
125 }
126 __setup("pcie_port_pm=", pcie_port_pm_setup);
127
128 /**
129 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
130 * @bus: pointer to PCI bus structure to search
131 *
132 * Given a PCI bus, returns the highest PCI bus number present in the set
133 * including the given PCI bus and its list of child PCI buses.
134 */
135 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
136 {
137 struct pci_bus *tmp;
138 unsigned char max, n;
139
140 max = bus->busn_res.end;
141 list_for_each_entry(tmp, &bus->children, node) {
142 n = pci_bus_max_busnr(tmp);
143 if (n > max)
144 max = n;
145 }
146 return max;
147 }
148 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
149
150 #ifdef CONFIG_HAS_IOMEM
151 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
152 {
153 struct resource *res = &pdev->resource[bar];
154
155 /*
156 * Make sure the BAR is actually a memory resource, not an IO resource
157 */
158 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
159 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
160 return NULL;
161 }
162 return ioremap_nocache(res->start, resource_size(res));
163 }
164 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
165
166 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
167 {
168 /*
169 * Make sure the BAR is actually a memory resource, not an IO resource
170 */
171 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
172 WARN_ON(1);
173 return NULL;
174 }
175 return ioremap_wc(pci_resource_start(pdev, bar),
176 pci_resource_len(pdev, bar));
177 }
178 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
179 #endif
180
181
182 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
183 u8 pos, int cap, int *ttl)
184 {
185 u8 id;
186 u16 ent;
187
188 pci_bus_read_config_byte(bus, devfn, pos, &pos);
189
190 while ((*ttl)--) {
191 if (pos < 0x40)
192 break;
193 pos &= ~3;
194 pci_bus_read_config_word(bus, devfn, pos, &ent);
195
196 id = ent & 0xff;
197 if (id == 0xff)
198 break;
199 if (id == cap)
200 return pos;
201 pos = (ent >> 8);
202 }
203 return 0;
204 }
205
206 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
207 u8 pos, int cap)
208 {
209 int ttl = PCI_FIND_CAP_TTL;
210
211 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
212 }
213
214 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
215 {
216 return __pci_find_next_cap(dev->bus, dev->devfn,
217 pos + PCI_CAP_LIST_NEXT, cap);
218 }
219 EXPORT_SYMBOL_GPL(pci_find_next_capability);
220
221 static int __pci_bus_find_cap_start(struct pci_bus *bus,
222 unsigned int devfn, u8 hdr_type)
223 {
224 u16 status;
225
226 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
227 if (!(status & PCI_STATUS_CAP_LIST))
228 return 0;
229
230 switch (hdr_type) {
231 case PCI_HEADER_TYPE_NORMAL:
232 case PCI_HEADER_TYPE_BRIDGE:
233 return PCI_CAPABILITY_LIST;
234 case PCI_HEADER_TYPE_CARDBUS:
235 return PCI_CB_CAPABILITY_LIST;
236 }
237
238 return 0;
239 }
240
241 /**
242 * pci_find_capability - query for devices' capabilities
243 * @dev: PCI device to query
244 * @cap: capability code
245 *
246 * Tell if a device supports a given PCI capability.
247 * Returns the address of the requested capability structure within the
248 * device's PCI configuration space or 0 in case the device does not
249 * support it. Possible values for @cap:
250 *
251 * %PCI_CAP_ID_PM Power Management
252 * %PCI_CAP_ID_AGP Accelerated Graphics Port
253 * %PCI_CAP_ID_VPD Vital Product Data
254 * %PCI_CAP_ID_SLOTID Slot Identification
255 * %PCI_CAP_ID_MSI Message Signalled Interrupts
256 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
257 * %PCI_CAP_ID_PCIX PCI-X
258 * %PCI_CAP_ID_EXP PCI Express
259 */
260 int pci_find_capability(struct pci_dev *dev, int cap)
261 {
262 int pos;
263
264 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
265 if (pos)
266 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
267
268 return pos;
269 }
270 EXPORT_SYMBOL(pci_find_capability);
271
272 /**
273 * pci_bus_find_capability - query for devices' capabilities
274 * @bus: the PCI bus to query
275 * @devfn: PCI device to query
276 * @cap: capability code
277 *
278 * Like pci_find_capability() but works for pci devices that do not have a
279 * pci_dev structure set up yet.
280 *
281 * Returns the address of the requested capability structure within the
282 * device's PCI configuration space or 0 in case the device does not
283 * support it.
284 */
285 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
286 {
287 int pos;
288 u8 hdr_type;
289
290 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
291
292 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
293 if (pos)
294 pos = __pci_find_next_cap(bus, devfn, pos, cap);
295
296 return pos;
297 }
298 EXPORT_SYMBOL(pci_bus_find_capability);
299
300 /**
301 * pci_find_next_ext_capability - Find an extended capability
302 * @dev: PCI device to query
303 * @start: address at which to start looking (0 to start at beginning of list)
304 * @cap: capability code
305 *
306 * Returns the address of the next matching extended capability structure
307 * within the device's PCI configuration space or 0 if the device does
308 * not support it. Some capabilities can occur several times, e.g., the
309 * vendor-specific capability, and this provides a way to find them all.
310 */
311 int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
312 {
313 u32 header;
314 int ttl;
315 int pos = PCI_CFG_SPACE_SIZE;
316
317 /* minimum 8 bytes per capability */
318 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
319
320 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
321 return 0;
322
323 if (start)
324 pos = start;
325
326 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
327 return 0;
328
329 /*
330 * If we have no capabilities, this is indicated by cap ID,
331 * cap version and next pointer all being 0.
332 */
333 if (header == 0)
334 return 0;
335
336 while (ttl-- > 0) {
337 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
338 return pos;
339
340 pos = PCI_EXT_CAP_NEXT(header);
341 if (pos < PCI_CFG_SPACE_SIZE)
342 break;
343
344 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
345 break;
346 }
347
348 return 0;
349 }
350 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
351
352 /**
353 * pci_find_ext_capability - Find an extended capability
354 * @dev: PCI device to query
355 * @cap: capability code
356 *
357 * Returns the address of the requested extended capability structure
358 * within the device's PCI configuration space or 0 if the device does
359 * not support it. Possible values for @cap:
360 *
361 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
362 * %PCI_EXT_CAP_ID_VC Virtual Channel
363 * %PCI_EXT_CAP_ID_DSN Device Serial Number
364 * %PCI_EXT_CAP_ID_PWR Power Budgeting
365 */
366 int pci_find_ext_capability(struct pci_dev *dev, int cap)
367 {
368 return pci_find_next_ext_capability(dev, 0, cap);
369 }
370 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
371
372 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
373 {
374 int rc, ttl = PCI_FIND_CAP_TTL;
375 u8 cap, mask;
376
377 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
378 mask = HT_3BIT_CAP_MASK;
379 else
380 mask = HT_5BIT_CAP_MASK;
381
382 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
383 PCI_CAP_ID_HT, &ttl);
384 while (pos) {
385 rc = pci_read_config_byte(dev, pos + 3, &cap);
386 if (rc != PCIBIOS_SUCCESSFUL)
387 return 0;
388
389 if ((cap & mask) == ht_cap)
390 return pos;
391
392 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
393 pos + PCI_CAP_LIST_NEXT,
394 PCI_CAP_ID_HT, &ttl);
395 }
396
397 return 0;
398 }
399 /**
400 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
401 * @dev: PCI device to query
402 * @pos: Position from which to continue searching
403 * @ht_cap: Hypertransport capability code
404 *
405 * To be used in conjunction with pci_find_ht_capability() to search for
406 * all capabilities matching @ht_cap. @pos should always be a value returned
407 * from pci_find_ht_capability().
408 *
409 * NB. To be 100% safe against broken PCI devices, the caller should take
410 * steps to avoid an infinite loop.
411 */
412 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
413 {
414 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
415 }
416 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
417
418 /**
419 * pci_find_ht_capability - query a device's Hypertransport capabilities
420 * @dev: PCI device to query
421 * @ht_cap: Hypertransport capability code
422 *
423 * Tell if a device supports a given Hypertransport capability.
424 * Returns an address within the device's PCI configuration space
425 * or 0 in case the device does not support the request capability.
426 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
427 * which has a Hypertransport capability matching @ht_cap.
428 */
429 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
430 {
431 int pos;
432
433 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
434 if (pos)
435 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
436
437 return pos;
438 }
439 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
440
441 /**
442 * pci_find_parent_resource - return resource region of parent bus of given region
443 * @dev: PCI device structure contains resources to be searched
444 * @res: child resource record for which parent is sought
445 *
446 * For given resource region of given device, return the resource
447 * region of parent bus the given region is contained in.
448 */
449 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
450 struct resource *res)
451 {
452 const struct pci_bus *bus = dev->bus;
453 struct resource *r;
454 int i;
455
456 pci_bus_for_each_resource(bus, r, i) {
457 if (!r)
458 continue;
459 if (resource_contains(r, res)) {
460
461 /*
462 * If the window is prefetchable but the BAR is
463 * not, the allocator made a mistake.
464 */
465 if (r->flags & IORESOURCE_PREFETCH &&
466 !(res->flags & IORESOURCE_PREFETCH))
467 return NULL;
468
469 /*
470 * If we're below a transparent bridge, there may
471 * be both a positively-decoded aperture and a
472 * subtractively-decoded region that contain the BAR.
473 * We want the positively-decoded one, so this depends
474 * on pci_bus_for_each_resource() giving us those
475 * first.
476 */
477 return r;
478 }
479 }
480 return NULL;
481 }
482 EXPORT_SYMBOL(pci_find_parent_resource);
483
484 /**
485 * pci_find_resource - Return matching PCI device resource
486 * @dev: PCI device to query
487 * @res: Resource to look for
488 *
489 * Goes over standard PCI resources (BARs) and checks if the given resource
490 * is partially or fully contained in any of them. In that case the
491 * matching resource is returned, %NULL otherwise.
492 */
493 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
494 {
495 int i;
496
497 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
498 struct resource *r = &dev->resource[i];
499
500 if (r->start && resource_contains(r, res))
501 return r;
502 }
503
504 return NULL;
505 }
506 EXPORT_SYMBOL(pci_find_resource);
507
508 /**
509 * pci_find_pcie_root_port - return PCIe Root Port
510 * @dev: PCI device to query
511 *
512 * Traverse up the parent chain and return the PCIe Root Port PCI Device
513 * for a given PCI Device.
514 */
515 struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
516 {
517 struct pci_dev *bridge, *highest_pcie_bridge = dev;
518
519 bridge = pci_upstream_bridge(dev);
520 while (bridge && pci_is_pcie(bridge)) {
521 highest_pcie_bridge = bridge;
522 bridge = pci_upstream_bridge(bridge);
523 }
524
525 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
526 return NULL;
527
528 return highest_pcie_bridge;
529 }
530 EXPORT_SYMBOL(pci_find_pcie_root_port);
531
532 /**
533 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
534 * @dev: the PCI device to operate on
535 * @pos: config space offset of status word
536 * @mask: mask of bit(s) to care about in status word
537 *
538 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
539 */
540 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
541 {
542 int i;
543
544 /* Wait for Transaction Pending bit clean */
545 for (i = 0; i < 4; i++) {
546 u16 status;
547 if (i)
548 msleep((1 << (i - 1)) * 100);
549
550 pci_read_config_word(dev, pos, &status);
551 if (!(status & mask))
552 return 1;
553 }
554
555 return 0;
556 }
557
558 /**
559 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
560 * @dev: PCI device to have its BARs restored
561 *
562 * Restore the BAR values for a given device, so as to make it
563 * accessible by its driver.
564 */
565 static void pci_restore_bars(struct pci_dev *dev)
566 {
567 int i;
568
569 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
570 pci_update_resource(dev, i);
571 }
572
573 static const struct pci_platform_pm_ops *pci_platform_pm;
574
575 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
576 {
577 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
578 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
579 return -EINVAL;
580 pci_platform_pm = ops;
581 return 0;
582 }
583
584 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
585 {
586 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
587 }
588
589 static inline int platform_pci_set_power_state(struct pci_dev *dev,
590 pci_power_t t)
591 {
592 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
593 }
594
595 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
596 {
597 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
598 }
599
600 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
601 {
602 return pci_platform_pm ?
603 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
604 }
605
606 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
607 {
608 return pci_platform_pm ?
609 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
610 }
611
612 static inline bool platform_pci_need_resume(struct pci_dev *dev)
613 {
614 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
615 }
616
617 /**
618 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
619 * given PCI device
620 * @dev: PCI device to handle.
621 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
622 *
623 * RETURN VALUE:
624 * -EINVAL if the requested state is invalid.
625 * -EIO if device does not support PCI PM or its PM capabilities register has a
626 * wrong version, or device doesn't support the requested state.
627 * 0 if device already is in the requested state.
628 * 0 if device's power state has been successfully changed.
629 */
630 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
631 {
632 u16 pmcsr;
633 bool need_restore = false;
634
635 /* Check if we're already there */
636 if (dev->current_state == state)
637 return 0;
638
639 if (!dev->pm_cap)
640 return -EIO;
641
642 if (state < PCI_D0 || state > PCI_D3hot)
643 return -EINVAL;
644
645 /* Validate current state:
646 * Can enter D0 from any state, but if we can only go deeper
647 * to sleep if we're already in a low power state
648 */
649 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
650 && dev->current_state > state) {
651 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
652 dev->current_state, state);
653 return -EINVAL;
654 }
655
656 /* check if this device supports the desired state */
657 if ((state == PCI_D1 && !dev->d1_support)
658 || (state == PCI_D2 && !dev->d2_support))
659 return -EIO;
660
661 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
662
663 /* If we're (effectively) in D3, force entire word to 0.
664 * This doesn't affect PME_Status, disables PME_En, and
665 * sets PowerState to 0.
666 */
667 switch (dev->current_state) {
668 case PCI_D0:
669 case PCI_D1:
670 case PCI_D2:
671 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
672 pmcsr |= state;
673 break;
674 case PCI_D3hot:
675 case PCI_D3cold:
676 case PCI_UNKNOWN: /* Boot-up */
677 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
678 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
679 need_restore = true;
680 /* Fall-through: force to D0 */
681 default:
682 pmcsr = 0;
683 break;
684 }
685
686 /* enter specified state */
687 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
688
689 /* Mandatory power management transition delays */
690 /* see PCI PM 1.1 5.6.1 table 18 */
691 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
692 pci_dev_d3_sleep(dev);
693 else if (state == PCI_D2 || dev->current_state == PCI_D2)
694 udelay(PCI_PM_D2_DELAY);
695
696 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
697 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
698 if (dev->current_state != state && printk_ratelimit())
699 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
700 dev->current_state);
701
702 /*
703 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
704 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
705 * from D3hot to D0 _may_ perform an internal reset, thereby
706 * going to "D0 Uninitialized" rather than "D0 Initialized".
707 * For example, at least some versions of the 3c905B and the
708 * 3c556B exhibit this behaviour.
709 *
710 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
711 * devices in a D3hot state at boot. Consequently, we need to
712 * restore at least the BARs so that the device will be
713 * accessible to its driver.
714 */
715 if (need_restore)
716 pci_restore_bars(dev);
717
718 if (dev->bus->self)
719 pcie_aspm_pm_state_change(dev->bus->self);
720
721 return 0;
722 }
723
724 /**
725 * pci_update_current_state - Read power state of given device and cache it
726 * @dev: PCI device to handle.
727 * @state: State to cache in case the device doesn't have the PM capability
728 *
729 * The power state is read from the PMCSR register, which however is
730 * inaccessible in D3cold. The platform firmware is therefore queried first
731 * to detect accessibility of the register. In case the platform firmware
732 * reports an incorrect state or the device isn't power manageable by the
733 * platform at all, we try to detect D3cold by testing accessibility of the
734 * vendor ID in config space.
735 */
736 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
737 {
738 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
739 !pci_device_is_present(dev)) {
740 dev->current_state = PCI_D3cold;
741 } else if (dev->pm_cap) {
742 u16 pmcsr;
743
744 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
745 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
746 } else {
747 dev->current_state = state;
748 }
749 }
750
751 /**
752 * pci_power_up - Put the given device into D0 forcibly
753 * @dev: PCI device to power up
754 */
755 void pci_power_up(struct pci_dev *dev)
756 {
757 if (platform_pci_power_manageable(dev))
758 platform_pci_set_power_state(dev, PCI_D0);
759
760 pci_raw_set_power_state(dev, PCI_D0);
761 pci_update_current_state(dev, PCI_D0);
762 }
763
764 /**
765 * pci_platform_power_transition - Use platform to change device power state
766 * @dev: PCI device to handle.
767 * @state: State to put the device into.
768 */
769 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
770 {
771 int error;
772
773 if (platform_pci_power_manageable(dev)) {
774 error = platform_pci_set_power_state(dev, state);
775 if (!error)
776 pci_update_current_state(dev, state);
777 } else
778 error = -ENODEV;
779
780 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
781 dev->current_state = PCI_D0;
782
783 return error;
784 }
785
786 /**
787 * pci_wakeup - Wake up a PCI device
788 * @pci_dev: Device to handle.
789 * @ign: ignored parameter
790 */
791 static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
792 {
793 pci_wakeup_event(pci_dev);
794 pm_request_resume(&pci_dev->dev);
795 return 0;
796 }
797
798 /**
799 * pci_wakeup_bus - Walk given bus and wake up devices on it
800 * @bus: Top bus of the subtree to walk.
801 */
802 static void pci_wakeup_bus(struct pci_bus *bus)
803 {
804 if (bus)
805 pci_walk_bus(bus, pci_wakeup, NULL);
806 }
807
808 /**
809 * __pci_start_power_transition - Start power transition of a PCI device
810 * @dev: PCI device to handle.
811 * @state: State to put the device into.
812 */
813 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
814 {
815 if (state == PCI_D0) {
816 pci_platform_power_transition(dev, PCI_D0);
817 /*
818 * Mandatory power management transition delays, see
819 * PCI Express Base Specification Revision 2.0 Section
820 * 6.6.1: Conventional Reset. Do not delay for
821 * devices powered on/off by corresponding bridge,
822 * because have already delayed for the bridge.
823 */
824 if (dev->runtime_d3cold) {
825 if (dev->d3cold_delay)
826 msleep(dev->d3cold_delay);
827 /*
828 * When powering on a bridge from D3cold, the
829 * whole hierarchy may be powered on into
830 * D0uninitialized state, resume them to give
831 * them a chance to suspend again
832 */
833 pci_wakeup_bus(dev->subordinate);
834 }
835 }
836 }
837
838 /**
839 * __pci_dev_set_current_state - Set current state of a PCI device
840 * @dev: Device to handle
841 * @data: pointer to state to be set
842 */
843 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
844 {
845 pci_power_t state = *(pci_power_t *)data;
846
847 dev->current_state = state;
848 return 0;
849 }
850
851 /**
852 * __pci_bus_set_current_state - Walk given bus and set current state of devices
853 * @bus: Top bus of the subtree to walk.
854 * @state: state to be set
855 */
856 static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
857 {
858 if (bus)
859 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
860 }
861
862 /**
863 * __pci_complete_power_transition - Complete power transition of a PCI device
864 * @dev: PCI device to handle.
865 * @state: State to put the device into.
866 *
867 * This function should not be called directly by device drivers.
868 */
869 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
870 {
871 int ret;
872
873 if (state <= PCI_D0)
874 return -EINVAL;
875 ret = pci_platform_power_transition(dev, state);
876 /* Power off the bridge may power off the whole hierarchy */
877 if (!ret && state == PCI_D3cold)
878 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
879 return ret;
880 }
881 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
882
883 /**
884 * pci_set_power_state - Set the power state of a PCI device
885 * @dev: PCI device to handle.
886 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
887 *
888 * Transition a device to a new power state, using the platform firmware and/or
889 * the device's PCI PM registers.
890 *
891 * RETURN VALUE:
892 * -EINVAL if the requested state is invalid.
893 * -EIO if device does not support PCI PM or its PM capabilities register has a
894 * wrong version, or device doesn't support the requested state.
895 * 0 if device already is in the requested state.
896 * 0 if device's power state has been successfully changed.
897 */
898 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
899 {
900 int error;
901
902 /* bound the state we're entering */
903 if (state > PCI_D3cold)
904 state = PCI_D3cold;
905 else if (state < PCI_D0)
906 state = PCI_D0;
907 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
908 /*
909 * If the device or the parent bridge do not support PCI PM,
910 * ignore the request if we're doing anything other than putting
911 * it into D0 (which would only happen on boot).
912 */
913 return 0;
914
915 /* Check if we're already there */
916 if (dev->current_state == state)
917 return 0;
918
919 __pci_start_power_transition(dev, state);
920
921 /* This device is quirked not to be put into D3, so
922 don't put it in D3 */
923 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
924 return 0;
925
926 /*
927 * To put device in D3cold, we put device into D3hot in native
928 * way, then put device into D3cold with platform ops
929 */
930 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
931 PCI_D3hot : state);
932
933 if (!__pci_complete_power_transition(dev, state))
934 error = 0;
935
936 return error;
937 }
938 EXPORT_SYMBOL(pci_set_power_state);
939
940 /**
941 * pci_choose_state - Choose the power state of a PCI device
942 * @dev: PCI device to be suspended
943 * @state: target sleep state for the whole system. This is the value
944 * that is passed to suspend() function.
945 *
946 * Returns PCI power state suitable for given device and given system
947 * message.
948 */
949
950 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
951 {
952 pci_power_t ret;
953
954 if (!dev->pm_cap)
955 return PCI_D0;
956
957 ret = platform_pci_choose_state(dev);
958 if (ret != PCI_POWER_ERROR)
959 return ret;
960
961 switch (state.event) {
962 case PM_EVENT_ON:
963 return PCI_D0;
964 case PM_EVENT_FREEZE:
965 case PM_EVENT_PRETHAW:
966 /* REVISIT both freeze and pre-thaw "should" use D0 */
967 case PM_EVENT_SUSPEND:
968 case PM_EVENT_HIBERNATE:
969 return PCI_D3hot;
970 default:
971 dev_info(&dev->dev, "unrecognized suspend event %d\n",
972 state.event);
973 BUG();
974 }
975 return PCI_D0;
976 }
977 EXPORT_SYMBOL(pci_choose_state);
978
979 #define PCI_EXP_SAVE_REGS 7
980
981 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
982 u16 cap, bool extended)
983 {
984 struct pci_cap_saved_state *tmp;
985
986 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
987 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
988 return tmp;
989 }
990 return NULL;
991 }
992
993 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
994 {
995 return _pci_find_saved_cap(dev, cap, false);
996 }
997
998 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
999 {
1000 return _pci_find_saved_cap(dev, cap, true);
1001 }
1002
1003 static int pci_save_pcie_state(struct pci_dev *dev)
1004 {
1005 int i = 0;
1006 struct pci_cap_saved_state *save_state;
1007 u16 *cap;
1008
1009 if (!pci_is_pcie(dev))
1010 return 0;
1011
1012 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1013 if (!save_state) {
1014 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1015 return -ENOMEM;
1016 }
1017
1018 cap = (u16 *)&save_state->cap.data[0];
1019 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1020 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1021 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1022 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1023 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1024 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1025 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1026
1027 return 0;
1028 }
1029
1030 static void pci_restore_pcie_state(struct pci_dev *dev)
1031 {
1032 int i = 0;
1033 struct pci_cap_saved_state *save_state;
1034 u16 *cap;
1035
1036 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1037 if (!save_state)
1038 return;
1039
1040 cap = (u16 *)&save_state->cap.data[0];
1041 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1042 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1043 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1044 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1045 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1046 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1047 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1048 }
1049
1050
1051 static int pci_save_pcix_state(struct pci_dev *dev)
1052 {
1053 int pos;
1054 struct pci_cap_saved_state *save_state;
1055
1056 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1057 if (!pos)
1058 return 0;
1059
1060 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1061 if (!save_state) {
1062 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1063 return -ENOMEM;
1064 }
1065
1066 pci_read_config_word(dev, pos + PCI_X_CMD,
1067 (u16 *)save_state->cap.data);
1068
1069 return 0;
1070 }
1071
1072 static void pci_restore_pcix_state(struct pci_dev *dev)
1073 {
1074 int i = 0, pos;
1075 struct pci_cap_saved_state *save_state;
1076 u16 *cap;
1077
1078 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1079 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1080 if (!save_state || !pos)
1081 return;
1082 cap = (u16 *)&save_state->cap.data[0];
1083
1084 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1085 }
1086
1087
1088 /**
1089 * pci_save_state - save the PCI configuration space of a device before suspending
1090 * @dev: - PCI device that we're dealing with
1091 */
1092 int pci_save_state(struct pci_dev *dev)
1093 {
1094 int i;
1095 /* XXX: 100% dword access ok here? */
1096 for (i = 0; i < 16; i++)
1097 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1098 dev->state_saved = true;
1099
1100 i = pci_save_pcie_state(dev);
1101 if (i != 0)
1102 return i;
1103
1104 i = pci_save_pcix_state(dev);
1105 if (i != 0)
1106 return i;
1107
1108 return pci_save_vc_state(dev);
1109 }
1110 EXPORT_SYMBOL(pci_save_state);
1111
1112 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1113 u32 saved_val, int retry)
1114 {
1115 u32 val;
1116
1117 pci_read_config_dword(pdev, offset, &val);
1118 if (val == saved_val)
1119 return;
1120
1121 for (;;) {
1122 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1123 offset, val, saved_val);
1124 pci_write_config_dword(pdev, offset, saved_val);
1125 if (retry-- <= 0)
1126 return;
1127
1128 pci_read_config_dword(pdev, offset, &val);
1129 if (val == saved_val)
1130 return;
1131
1132 mdelay(1);
1133 }
1134 }
1135
1136 static void pci_restore_config_space_range(struct pci_dev *pdev,
1137 int start, int end, int retry)
1138 {
1139 int index;
1140
1141 for (index = end; index >= start; index--)
1142 pci_restore_config_dword(pdev, 4 * index,
1143 pdev->saved_config_space[index],
1144 retry);
1145 }
1146
1147 static void pci_restore_config_space(struct pci_dev *pdev)
1148 {
1149 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1150 pci_restore_config_space_range(pdev, 10, 15, 0);
1151 /* Restore BARs before the command register. */
1152 pci_restore_config_space_range(pdev, 4, 9, 10);
1153 pci_restore_config_space_range(pdev, 0, 3, 0);
1154 } else {
1155 pci_restore_config_space_range(pdev, 0, 15, 0);
1156 }
1157 }
1158
1159 /**
1160 * pci_restore_state - Restore the saved state of a PCI device
1161 * @dev: - PCI device that we're dealing with
1162 */
1163 void pci_restore_state(struct pci_dev *dev)
1164 {
1165 if (!dev->state_saved)
1166 return;
1167
1168 /* PCI Express register must be restored first */
1169 pci_restore_pcie_state(dev);
1170 pci_restore_pasid_state(dev);
1171 pci_restore_pri_state(dev);
1172 pci_restore_ats_state(dev);
1173 pci_restore_vc_state(dev);
1174
1175 pci_cleanup_aer_error_status_regs(dev);
1176
1177 pci_restore_config_space(dev);
1178
1179 pci_restore_pcix_state(dev);
1180 pci_restore_msi_state(dev);
1181
1182 /* Restore ACS and IOV configuration state */
1183 pci_enable_acs(dev);
1184 pci_restore_iov_state(dev);
1185
1186 dev->state_saved = false;
1187 }
1188 EXPORT_SYMBOL(pci_restore_state);
1189
1190 struct pci_saved_state {
1191 u32 config_space[16];
1192 struct pci_cap_saved_data cap[0];
1193 };
1194
1195 /**
1196 * pci_store_saved_state - Allocate and return an opaque struct containing
1197 * the device saved state.
1198 * @dev: PCI device that we're dealing with
1199 *
1200 * Return NULL if no state or error.
1201 */
1202 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1203 {
1204 struct pci_saved_state *state;
1205 struct pci_cap_saved_state *tmp;
1206 struct pci_cap_saved_data *cap;
1207 size_t size;
1208
1209 if (!dev->state_saved)
1210 return NULL;
1211
1212 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1213
1214 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1215 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1216
1217 state = kzalloc(size, GFP_KERNEL);
1218 if (!state)
1219 return NULL;
1220
1221 memcpy(state->config_space, dev->saved_config_space,
1222 sizeof(state->config_space));
1223
1224 cap = state->cap;
1225 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1226 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1227 memcpy(cap, &tmp->cap, len);
1228 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1229 }
1230 /* Empty cap_save terminates list */
1231
1232 return state;
1233 }
1234 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1235
1236 /**
1237 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1238 * @dev: PCI device that we're dealing with
1239 * @state: Saved state returned from pci_store_saved_state()
1240 */
1241 int pci_load_saved_state(struct pci_dev *dev,
1242 struct pci_saved_state *state)
1243 {
1244 struct pci_cap_saved_data *cap;
1245
1246 dev->state_saved = false;
1247
1248 if (!state)
1249 return 0;
1250
1251 memcpy(dev->saved_config_space, state->config_space,
1252 sizeof(state->config_space));
1253
1254 cap = state->cap;
1255 while (cap->size) {
1256 struct pci_cap_saved_state *tmp;
1257
1258 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1259 if (!tmp || tmp->cap.size != cap->size)
1260 return -EINVAL;
1261
1262 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1263 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1264 sizeof(struct pci_cap_saved_data) + cap->size);
1265 }
1266
1267 dev->state_saved = true;
1268 return 0;
1269 }
1270 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1271
1272 /**
1273 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1274 * and free the memory allocated for it.
1275 * @dev: PCI device that we're dealing with
1276 * @state: Pointer to saved state returned from pci_store_saved_state()
1277 */
1278 int pci_load_and_free_saved_state(struct pci_dev *dev,
1279 struct pci_saved_state **state)
1280 {
1281 int ret = pci_load_saved_state(dev, *state);
1282 kfree(*state);
1283 *state = NULL;
1284 return ret;
1285 }
1286 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1287
1288 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1289 {
1290 return pci_enable_resources(dev, bars);
1291 }
1292
1293 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1294 {
1295 int err;
1296 struct pci_dev *bridge;
1297 u16 cmd;
1298 u8 pin;
1299
1300 err = pci_set_power_state(dev, PCI_D0);
1301 if (err < 0 && err != -EIO)
1302 return err;
1303
1304 bridge = pci_upstream_bridge(dev);
1305 if (bridge)
1306 pcie_aspm_powersave_config_link(bridge);
1307
1308 err = pcibios_enable_device(dev, bars);
1309 if (err < 0)
1310 return err;
1311 pci_fixup_device(pci_fixup_enable, dev);
1312
1313 if (dev->msi_enabled || dev->msix_enabled)
1314 return 0;
1315
1316 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1317 if (pin) {
1318 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1319 if (cmd & PCI_COMMAND_INTX_DISABLE)
1320 pci_write_config_word(dev, PCI_COMMAND,
1321 cmd & ~PCI_COMMAND_INTX_DISABLE);
1322 }
1323
1324 return 0;
1325 }
1326
1327 /**
1328 * pci_reenable_device - Resume abandoned device
1329 * @dev: PCI device to be resumed
1330 *
1331 * Note this function is a backend of pci_default_resume and is not supposed
1332 * to be called by normal code, write proper resume handler and use it instead.
1333 */
1334 int pci_reenable_device(struct pci_dev *dev)
1335 {
1336 if (pci_is_enabled(dev))
1337 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1338 return 0;
1339 }
1340 EXPORT_SYMBOL(pci_reenable_device);
1341
1342 static void pci_enable_bridge(struct pci_dev *dev)
1343 {
1344 struct pci_dev *bridge;
1345 int retval;
1346
1347 bridge = pci_upstream_bridge(dev);
1348 if (bridge)
1349 pci_enable_bridge(bridge);
1350
1351 if (pci_is_enabled(dev)) {
1352 if (!dev->is_busmaster)
1353 pci_set_master(dev);
1354 return;
1355 }
1356
1357 retval = pci_enable_device(dev);
1358 if (retval)
1359 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1360 retval);
1361 pci_set_master(dev);
1362 }
1363
1364 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1365 {
1366 struct pci_dev *bridge;
1367 int err;
1368 int i, bars = 0;
1369
1370 /*
1371 * Power state could be unknown at this point, either due to a fresh
1372 * boot or a device removal call. So get the current power state
1373 * so that things like MSI message writing will behave as expected
1374 * (e.g. if the device really is in D0 at enable time).
1375 */
1376 if (dev->pm_cap) {
1377 u16 pmcsr;
1378 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1379 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1380 }
1381
1382 if (atomic_inc_return(&dev->enable_cnt) > 1)
1383 return 0; /* already enabled */
1384
1385 bridge = pci_upstream_bridge(dev);
1386 if (bridge)
1387 pci_enable_bridge(bridge);
1388
1389 /* only skip sriov related */
1390 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1391 if (dev->resource[i].flags & flags)
1392 bars |= (1 << i);
1393 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1394 if (dev->resource[i].flags & flags)
1395 bars |= (1 << i);
1396
1397 err = do_pci_enable_device(dev, bars);
1398 if (err < 0)
1399 atomic_dec(&dev->enable_cnt);
1400 return err;
1401 }
1402
1403 /**
1404 * pci_enable_device_io - Initialize a device for use with IO space
1405 * @dev: PCI device to be initialized
1406 *
1407 * Initialize device before it's used by a driver. Ask low-level code
1408 * to enable I/O resources. Wake up the device if it was suspended.
1409 * Beware, this function can fail.
1410 */
1411 int pci_enable_device_io(struct pci_dev *dev)
1412 {
1413 return pci_enable_device_flags(dev, IORESOURCE_IO);
1414 }
1415 EXPORT_SYMBOL(pci_enable_device_io);
1416
1417 /**
1418 * pci_enable_device_mem - Initialize a device for use with Memory space
1419 * @dev: PCI device to be initialized
1420 *
1421 * Initialize device before it's used by a driver. Ask low-level code
1422 * to enable Memory resources. Wake up the device if it was suspended.
1423 * Beware, this function can fail.
1424 */
1425 int pci_enable_device_mem(struct pci_dev *dev)
1426 {
1427 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1428 }
1429 EXPORT_SYMBOL(pci_enable_device_mem);
1430
1431 /**
1432 * pci_enable_device - Initialize device before it's used by a driver.
1433 * @dev: PCI device to be initialized
1434 *
1435 * Initialize device before it's used by a driver. Ask low-level code
1436 * to enable I/O and memory. Wake up the device if it was suspended.
1437 * Beware, this function can fail.
1438 *
1439 * Note we don't actually enable the device many times if we call
1440 * this function repeatedly (we just increment the count).
1441 */
1442 int pci_enable_device(struct pci_dev *dev)
1443 {
1444 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1445 }
1446 EXPORT_SYMBOL(pci_enable_device);
1447
1448 /*
1449 * Managed PCI resources. This manages device on/off, intx/msi/msix
1450 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1451 * there's no need to track it separately. pci_devres is initialized
1452 * when a device is enabled using managed PCI device enable interface.
1453 */
1454 struct pci_devres {
1455 unsigned int enabled:1;
1456 unsigned int pinned:1;
1457 unsigned int orig_intx:1;
1458 unsigned int restore_intx:1;
1459 u32 region_mask;
1460 };
1461
1462 static void pcim_release(struct device *gendev, void *res)
1463 {
1464 struct pci_dev *dev = to_pci_dev(gendev);
1465 struct pci_devres *this = res;
1466 int i;
1467
1468 if (dev->msi_enabled)
1469 pci_disable_msi(dev);
1470 if (dev->msix_enabled)
1471 pci_disable_msix(dev);
1472
1473 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1474 if (this->region_mask & (1 << i))
1475 pci_release_region(dev, i);
1476
1477 if (this->restore_intx)
1478 pci_intx(dev, this->orig_intx);
1479
1480 if (this->enabled && !this->pinned)
1481 pci_disable_device(dev);
1482 }
1483
1484 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1485 {
1486 struct pci_devres *dr, *new_dr;
1487
1488 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1489 if (dr)
1490 return dr;
1491
1492 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1493 if (!new_dr)
1494 return NULL;
1495 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1496 }
1497
1498 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1499 {
1500 if (pci_is_managed(pdev))
1501 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1502 return NULL;
1503 }
1504
1505 /**
1506 * pcim_enable_device - Managed pci_enable_device()
1507 * @pdev: PCI device to be initialized
1508 *
1509 * Managed pci_enable_device().
1510 */
1511 int pcim_enable_device(struct pci_dev *pdev)
1512 {
1513 struct pci_devres *dr;
1514 int rc;
1515
1516 dr = get_pci_dr(pdev);
1517 if (unlikely(!dr))
1518 return -ENOMEM;
1519 if (dr->enabled)
1520 return 0;
1521
1522 rc = pci_enable_device(pdev);
1523 if (!rc) {
1524 pdev->is_managed = 1;
1525 dr->enabled = 1;
1526 }
1527 return rc;
1528 }
1529 EXPORT_SYMBOL(pcim_enable_device);
1530
1531 /**
1532 * pcim_pin_device - Pin managed PCI device
1533 * @pdev: PCI device to pin
1534 *
1535 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1536 * driver detach. @pdev must have been enabled with
1537 * pcim_enable_device().
1538 */
1539 void pcim_pin_device(struct pci_dev *pdev)
1540 {
1541 struct pci_devres *dr;
1542
1543 dr = find_pci_dr(pdev);
1544 WARN_ON(!dr || !dr->enabled);
1545 if (dr)
1546 dr->pinned = 1;
1547 }
1548 EXPORT_SYMBOL(pcim_pin_device);
1549
1550 /*
1551 * pcibios_add_device - provide arch specific hooks when adding device dev
1552 * @dev: the PCI device being added
1553 *
1554 * Permits the platform to provide architecture specific functionality when
1555 * devices are added. This is the default implementation. Architecture
1556 * implementations can override this.
1557 */
1558 int __weak pcibios_add_device(struct pci_dev *dev)
1559 {
1560 return 0;
1561 }
1562
1563 /**
1564 * pcibios_release_device - provide arch specific hooks when releasing device dev
1565 * @dev: the PCI device being released
1566 *
1567 * Permits the platform to provide architecture specific functionality when
1568 * devices are released. This is the default implementation. Architecture
1569 * implementations can override this.
1570 */
1571 void __weak pcibios_release_device(struct pci_dev *dev) {}
1572
1573 /**
1574 * pcibios_disable_device - disable arch specific PCI resources for device dev
1575 * @dev: the PCI device to disable
1576 *
1577 * Disables architecture specific PCI resources for the device. This
1578 * is the default implementation. Architecture implementations can
1579 * override this.
1580 */
1581 void __weak pcibios_disable_device(struct pci_dev *dev) {}
1582
1583 /**
1584 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1585 * @irq: ISA IRQ to penalize
1586 * @active: IRQ active or not
1587 *
1588 * Permits the platform to provide architecture-specific functionality when
1589 * penalizing ISA IRQs. This is the default implementation. Architecture
1590 * implementations can override this.
1591 */
1592 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1593
1594 static void do_pci_disable_device(struct pci_dev *dev)
1595 {
1596 u16 pci_command;
1597
1598 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1599 if (pci_command & PCI_COMMAND_MASTER) {
1600 pci_command &= ~PCI_COMMAND_MASTER;
1601 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1602 }
1603
1604 pcibios_disable_device(dev);
1605 }
1606
1607 /**
1608 * pci_disable_enabled_device - Disable device without updating enable_cnt
1609 * @dev: PCI device to disable
1610 *
1611 * NOTE: This function is a backend of PCI power management routines and is
1612 * not supposed to be called drivers.
1613 */
1614 void pci_disable_enabled_device(struct pci_dev *dev)
1615 {
1616 if (pci_is_enabled(dev))
1617 do_pci_disable_device(dev);
1618 }
1619
1620 /**
1621 * pci_disable_device - Disable PCI device after use
1622 * @dev: PCI device to be disabled
1623 *
1624 * Signal to the system that the PCI device is not in use by the system
1625 * anymore. This only involves disabling PCI bus-mastering, if active.
1626 *
1627 * Note we don't actually disable the device until all callers of
1628 * pci_enable_device() have called pci_disable_device().
1629 */
1630 void pci_disable_device(struct pci_dev *dev)
1631 {
1632 struct pci_devres *dr;
1633
1634 dr = find_pci_dr(dev);
1635 if (dr)
1636 dr->enabled = 0;
1637
1638 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1639 "disabling already-disabled device");
1640
1641 if (atomic_dec_return(&dev->enable_cnt) != 0)
1642 return;
1643
1644 do_pci_disable_device(dev);
1645
1646 dev->is_busmaster = 0;
1647 }
1648 EXPORT_SYMBOL(pci_disable_device);
1649
1650 /**
1651 * pcibios_set_pcie_reset_state - set reset state for device dev
1652 * @dev: the PCIe device reset
1653 * @state: Reset state to enter into
1654 *
1655 *
1656 * Sets the PCIe reset state for the device. This is the default
1657 * implementation. Architecture implementations can override this.
1658 */
1659 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1660 enum pcie_reset_state state)
1661 {
1662 return -EINVAL;
1663 }
1664
1665 /**
1666 * pci_set_pcie_reset_state - set reset state for device dev
1667 * @dev: the PCIe device reset
1668 * @state: Reset state to enter into
1669 *
1670 *
1671 * Sets the PCI reset state for the device.
1672 */
1673 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1674 {
1675 return pcibios_set_pcie_reset_state(dev, state);
1676 }
1677 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1678
1679 /**
1680 * pci_check_pme_status - Check if given device has generated PME.
1681 * @dev: Device to check.
1682 *
1683 * Check the PME status of the device and if set, clear it and clear PME enable
1684 * (if set). Return 'true' if PME status and PME enable were both set or
1685 * 'false' otherwise.
1686 */
1687 bool pci_check_pme_status(struct pci_dev *dev)
1688 {
1689 int pmcsr_pos;
1690 u16 pmcsr;
1691 bool ret = false;
1692
1693 if (!dev->pm_cap)
1694 return false;
1695
1696 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1697 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1698 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1699 return false;
1700
1701 /* Clear PME status. */
1702 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1703 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1704 /* Disable PME to avoid interrupt flood. */
1705 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1706 ret = true;
1707 }
1708
1709 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1710
1711 return ret;
1712 }
1713
1714 /**
1715 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1716 * @dev: Device to handle.
1717 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1718 *
1719 * Check if @dev has generated PME and queue a resume request for it in that
1720 * case.
1721 */
1722 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1723 {
1724 if (pme_poll_reset && dev->pme_poll)
1725 dev->pme_poll = false;
1726
1727 if (pci_check_pme_status(dev)) {
1728 pci_wakeup_event(dev);
1729 pm_request_resume(&dev->dev);
1730 }
1731 return 0;
1732 }
1733
1734 /**
1735 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1736 * @bus: Top bus of the subtree to walk.
1737 */
1738 void pci_pme_wakeup_bus(struct pci_bus *bus)
1739 {
1740 if (bus)
1741 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1742 }
1743
1744
1745 /**
1746 * pci_pme_capable - check the capability of PCI device to generate PME#
1747 * @dev: PCI device to handle.
1748 * @state: PCI state from which device will issue PME#.
1749 */
1750 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1751 {
1752 if (!dev->pm_cap)
1753 return false;
1754
1755 return !!(dev->pme_support & (1 << state));
1756 }
1757 EXPORT_SYMBOL(pci_pme_capable);
1758
1759 static void pci_pme_list_scan(struct work_struct *work)
1760 {
1761 struct pci_pme_device *pme_dev, *n;
1762
1763 mutex_lock(&pci_pme_list_mutex);
1764 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1765 if (pme_dev->dev->pme_poll) {
1766 struct pci_dev *bridge;
1767
1768 bridge = pme_dev->dev->bus->self;
1769 /*
1770 * If bridge is in low power state, the
1771 * configuration space of subordinate devices
1772 * may be not accessible
1773 */
1774 if (bridge && bridge->current_state != PCI_D0)
1775 continue;
1776 pci_pme_wakeup(pme_dev->dev, NULL);
1777 } else {
1778 list_del(&pme_dev->list);
1779 kfree(pme_dev);
1780 }
1781 }
1782 if (!list_empty(&pci_pme_list))
1783 queue_delayed_work(system_freezable_wq, &pci_pme_work,
1784 msecs_to_jiffies(PME_TIMEOUT));
1785 mutex_unlock(&pci_pme_list_mutex);
1786 }
1787
1788 static void __pci_pme_active(struct pci_dev *dev, bool enable)
1789 {
1790 u16 pmcsr;
1791
1792 if (!dev->pme_support)
1793 return;
1794
1795 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1796 /* Clear PME_Status by writing 1 to it and enable PME# */
1797 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1798 if (!enable)
1799 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1800
1801 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1802 }
1803
1804 /**
1805 * pci_pme_restore - Restore PME configuration after config space restore.
1806 * @dev: PCI device to update.
1807 */
1808 void pci_pme_restore(struct pci_dev *dev)
1809 {
1810 u16 pmcsr;
1811
1812 if (!dev->pme_support)
1813 return;
1814
1815 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1816 if (dev->wakeup_prepared) {
1817 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1818 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
1819 } else {
1820 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1821 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1822 }
1823 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1824 }
1825
1826 /**
1827 * pci_pme_active - enable or disable PCI device's PME# function
1828 * @dev: PCI device to handle.
1829 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1830 *
1831 * The caller must verify that the device is capable of generating PME# before
1832 * calling this function with @enable equal to 'true'.
1833 */
1834 void pci_pme_active(struct pci_dev *dev, bool enable)
1835 {
1836 __pci_pme_active(dev, enable);
1837
1838 /*
1839 * PCI (as opposed to PCIe) PME requires that the device have
1840 * its PME# line hooked up correctly. Not all hardware vendors
1841 * do this, so the PME never gets delivered and the device
1842 * remains asleep. The easiest way around this is to
1843 * periodically walk the list of suspended devices and check
1844 * whether any have their PME flag set. The assumption is that
1845 * we'll wake up often enough anyway that this won't be a huge
1846 * hit, and the power savings from the devices will still be a
1847 * win.
1848 *
1849 * Although PCIe uses in-band PME message instead of PME# line
1850 * to report PME, PME does not work for some PCIe devices in
1851 * reality. For example, there are devices that set their PME
1852 * status bits, but don't really bother to send a PME message;
1853 * there are PCI Express Root Ports that don't bother to
1854 * trigger interrupts when they receive PME messages from the
1855 * devices below. So PME poll is used for PCIe devices too.
1856 */
1857
1858 if (dev->pme_poll) {
1859 struct pci_pme_device *pme_dev;
1860 if (enable) {
1861 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1862 GFP_KERNEL);
1863 if (!pme_dev) {
1864 dev_warn(&dev->dev, "can't enable PME#\n");
1865 return;
1866 }
1867 pme_dev->dev = dev;
1868 mutex_lock(&pci_pme_list_mutex);
1869 list_add(&pme_dev->list, &pci_pme_list);
1870 if (list_is_singular(&pci_pme_list))
1871 queue_delayed_work(system_freezable_wq,
1872 &pci_pme_work,
1873 msecs_to_jiffies(PME_TIMEOUT));
1874 mutex_unlock(&pci_pme_list_mutex);
1875 } else {
1876 mutex_lock(&pci_pme_list_mutex);
1877 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1878 if (pme_dev->dev == dev) {
1879 list_del(&pme_dev->list);
1880 kfree(pme_dev);
1881 break;
1882 }
1883 }
1884 mutex_unlock(&pci_pme_list_mutex);
1885 }
1886 }
1887
1888 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1889 }
1890 EXPORT_SYMBOL(pci_pme_active);
1891
1892 /**
1893 * pci_enable_wake - enable PCI device as wakeup event source
1894 * @dev: PCI device affected
1895 * @state: PCI state from which device will issue wakeup events
1896 * @enable: True to enable event generation; false to disable
1897 *
1898 * This enables the device as a wakeup event source, or disables it.
1899 * When such events involves platform-specific hooks, those hooks are
1900 * called automatically by this routine.
1901 *
1902 * Devices with legacy power management (no standard PCI PM capabilities)
1903 * always require such platform hooks.
1904 *
1905 * RETURN VALUE:
1906 * 0 is returned on success
1907 * -EINVAL is returned if device is not supposed to wake up the system
1908 * Error code depending on the platform is returned if both the platform and
1909 * the native mechanism fail to enable the generation of wake-up events
1910 */
1911 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1912 {
1913 int ret = 0;
1914
1915 /* Don't do the same thing twice in a row for one device. */
1916 if (!!enable == !!dev->wakeup_prepared)
1917 return 0;
1918
1919 /*
1920 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1921 * Anderson we should be doing PME# wake enable followed by ACPI wake
1922 * enable. To disable wake-up we call the platform first, for symmetry.
1923 */
1924
1925 if (enable) {
1926 int error;
1927
1928 if (pci_pme_capable(dev, state))
1929 pci_pme_active(dev, true);
1930 else
1931 ret = 1;
1932 error = platform_pci_set_wakeup(dev, true);
1933 if (ret)
1934 ret = error;
1935 if (!ret)
1936 dev->wakeup_prepared = true;
1937 } else {
1938 platform_pci_set_wakeup(dev, false);
1939 pci_pme_active(dev, false);
1940 dev->wakeup_prepared = false;
1941 }
1942
1943 return ret;
1944 }
1945 EXPORT_SYMBOL(pci_enable_wake);
1946
1947 /**
1948 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1949 * @dev: PCI device to prepare
1950 * @enable: True to enable wake-up event generation; false to disable
1951 *
1952 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1953 * and this function allows them to set that up cleanly - pci_enable_wake()
1954 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1955 * ordering constraints.
1956 *
1957 * This function only returns error code if the device is not capable of
1958 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1959 * enable wake-up power for it.
1960 */
1961 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1962 {
1963 return pci_pme_capable(dev, PCI_D3cold) ?
1964 pci_enable_wake(dev, PCI_D3cold, enable) :
1965 pci_enable_wake(dev, PCI_D3hot, enable);
1966 }
1967 EXPORT_SYMBOL(pci_wake_from_d3);
1968
1969 /**
1970 * pci_target_state - find an appropriate low power state for a given PCI dev
1971 * @dev: PCI device
1972 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
1973 *
1974 * Use underlying platform code to find a supported low power state for @dev.
1975 * If the platform can't manage @dev, return the deepest state from which it
1976 * can generate wake events, based on any available PME info.
1977 */
1978 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
1979 {
1980 pci_power_t target_state = PCI_D3hot;
1981
1982 if (platform_pci_power_manageable(dev)) {
1983 /*
1984 * Call the platform to choose the target state of the device
1985 * and enable wake-up from this state if supported.
1986 */
1987 pci_power_t state = platform_pci_choose_state(dev);
1988
1989 switch (state) {
1990 case PCI_POWER_ERROR:
1991 case PCI_UNKNOWN:
1992 break;
1993 case PCI_D1:
1994 case PCI_D2:
1995 if (pci_no_d1d2(dev))
1996 break;
1997 default:
1998 target_state = state;
1999 }
2000
2001 return target_state;
2002 }
2003
2004 if (!dev->pm_cap)
2005 target_state = PCI_D0;
2006
2007 /*
2008 * If the device is in D3cold even though it's not power-manageable by
2009 * the platform, it may have been powered down by non-standard means.
2010 * Best to let it slumber.
2011 */
2012 if (dev->current_state == PCI_D3cold)
2013 target_state = PCI_D3cold;
2014
2015 if (wakeup) {
2016 /*
2017 * Find the deepest state from which the device can generate
2018 * wake-up events, make it the target state and enable device
2019 * to generate PME#.
2020 */
2021 if (dev->pme_support) {
2022 while (target_state
2023 && !(dev->pme_support & (1 << target_state)))
2024 target_state--;
2025 }
2026 }
2027
2028 return target_state;
2029 }
2030
2031 /**
2032 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
2033 * @dev: Device to handle.
2034 *
2035 * Choose the power state appropriate for the device depending on whether
2036 * it can wake up the system and/or is power manageable by the platform
2037 * (PCI_D3hot is the default) and put the device into that state.
2038 */
2039 int pci_prepare_to_sleep(struct pci_dev *dev)
2040 {
2041 bool wakeup = device_may_wakeup(&dev->dev);
2042 pci_power_t target_state = pci_target_state(dev, wakeup);
2043 int error;
2044
2045 if (target_state == PCI_POWER_ERROR)
2046 return -EIO;
2047
2048 pci_enable_wake(dev, target_state, wakeup);
2049
2050 error = pci_set_power_state(dev, target_state);
2051
2052 if (error)
2053 pci_enable_wake(dev, target_state, false);
2054
2055 return error;
2056 }
2057 EXPORT_SYMBOL(pci_prepare_to_sleep);
2058
2059 /**
2060 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
2061 * @dev: Device to handle.
2062 *
2063 * Disable device's system wake-up capability and put it into D0.
2064 */
2065 int pci_back_from_sleep(struct pci_dev *dev)
2066 {
2067 pci_enable_wake(dev, PCI_D0, false);
2068 return pci_set_power_state(dev, PCI_D0);
2069 }
2070 EXPORT_SYMBOL(pci_back_from_sleep);
2071
2072 /**
2073 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2074 * @dev: PCI device being suspended.
2075 *
2076 * Prepare @dev to generate wake-up events at run time and put it into a low
2077 * power state.
2078 */
2079 int pci_finish_runtime_suspend(struct pci_dev *dev)
2080 {
2081 pci_power_t target_state;
2082 int error;
2083
2084 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2085 if (target_state == PCI_POWER_ERROR)
2086 return -EIO;
2087
2088 dev->runtime_d3cold = target_state == PCI_D3cold;
2089
2090 pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2091
2092 error = pci_set_power_state(dev, target_state);
2093
2094 if (error) {
2095 pci_enable_wake(dev, target_state, false);
2096 dev->runtime_d3cold = false;
2097 }
2098
2099 return error;
2100 }
2101
2102 /**
2103 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2104 * @dev: Device to check.
2105 *
2106 * Return true if the device itself is capable of generating wake-up events
2107 * (through the platform or using the native PCIe PME) or if the device supports
2108 * PME and one of its upstream bridges can generate wake-up events.
2109 */
2110 bool pci_dev_run_wake(struct pci_dev *dev)
2111 {
2112 struct pci_bus *bus = dev->bus;
2113
2114 if (!dev->pme_support)
2115 return false;
2116
2117 /* PME-capable in principle, but not from the target power state */
2118 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2119 return false;
2120
2121 if (device_can_wakeup(&dev->dev))
2122 return true;
2123
2124 while (bus->parent) {
2125 struct pci_dev *bridge = bus->self;
2126
2127 if (device_can_wakeup(&bridge->dev))
2128 return true;
2129
2130 bus = bus->parent;
2131 }
2132
2133 /* We have reached the root bus. */
2134 if (bus->bridge)
2135 return device_can_wakeup(bus->bridge);
2136
2137 return false;
2138 }
2139 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2140
2141 /**
2142 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2143 * @pci_dev: Device to check.
2144 *
2145 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2146 * reconfigured due to wakeup settings difference between system and runtime
2147 * suspend and the current power state of it is suitable for the upcoming
2148 * (system) transition.
2149 *
2150 * If the device is not configured for system wakeup, disable PME for it before
2151 * returning 'true' to prevent it from waking up the system unnecessarily.
2152 */
2153 bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2154 {
2155 struct device *dev = &pci_dev->dev;
2156 bool wakeup = device_may_wakeup(dev);
2157
2158 if (!pm_runtime_suspended(dev)
2159 || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
2160 || platform_pci_need_resume(pci_dev)
2161 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2162 return false;
2163
2164 /*
2165 * At this point the device is good to go unless it's been configured
2166 * to generate PME at the runtime suspend time, but it is not supposed
2167 * to wake up the system. In that case, simply disable PME for it
2168 * (it will have to be re-enabled on exit from system resume).
2169 *
2170 * If the device's power state is D3cold and the platform check above
2171 * hasn't triggered, the device's configuration is suitable and we don't
2172 * need to manipulate it at all.
2173 */
2174 spin_lock_irq(&dev->power.lock);
2175
2176 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2177 !wakeup)
2178 __pci_pme_active(pci_dev, false);
2179
2180 spin_unlock_irq(&dev->power.lock);
2181 return true;
2182 }
2183
2184 /**
2185 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2186 * @pci_dev: Device to handle.
2187 *
2188 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2189 * it might have been disabled during the prepare phase of system suspend if
2190 * the device was not configured for system wakeup.
2191 */
2192 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2193 {
2194 struct device *dev = &pci_dev->dev;
2195
2196 if (!pci_dev_run_wake(pci_dev))
2197 return;
2198
2199 spin_lock_irq(&dev->power.lock);
2200
2201 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2202 __pci_pme_active(pci_dev, true);
2203
2204 spin_unlock_irq(&dev->power.lock);
2205 }
2206
2207 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2208 {
2209 struct device *dev = &pdev->dev;
2210 struct device *parent = dev->parent;
2211
2212 if (parent)
2213 pm_runtime_get_sync(parent);
2214 pm_runtime_get_noresume(dev);
2215 /*
2216 * pdev->current_state is set to PCI_D3cold during suspending,
2217 * so wait until suspending completes
2218 */
2219 pm_runtime_barrier(dev);
2220 /*
2221 * Only need to resume devices in D3cold, because config
2222 * registers are still accessible for devices suspended but
2223 * not in D3cold.
2224 */
2225 if (pdev->current_state == PCI_D3cold)
2226 pm_runtime_resume(dev);
2227 }
2228
2229 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2230 {
2231 struct device *dev = &pdev->dev;
2232 struct device *parent = dev->parent;
2233
2234 pm_runtime_put(dev);
2235 if (parent)
2236 pm_runtime_put_sync(parent);
2237 }
2238
2239 /**
2240 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2241 * @bridge: Bridge to check
2242 *
2243 * This function checks if it is possible to move the bridge to D3.
2244 * Currently we only allow D3 for recent enough PCIe ports.
2245 */
2246 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2247 {
2248 unsigned int year;
2249
2250 if (!pci_is_pcie(bridge))
2251 return false;
2252
2253 switch (pci_pcie_type(bridge)) {
2254 case PCI_EXP_TYPE_ROOT_PORT:
2255 case PCI_EXP_TYPE_UPSTREAM:
2256 case PCI_EXP_TYPE_DOWNSTREAM:
2257 if (pci_bridge_d3_disable)
2258 return false;
2259
2260 /*
2261 * Hotplug interrupts cannot be delivered if the link is down,
2262 * so parents of a hotplug port must stay awake. In addition,
2263 * hotplug ports handled by firmware in System Management Mode
2264 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2265 * For simplicity, disallow in general for now.
2266 */
2267 if (bridge->is_hotplug_bridge)
2268 return false;
2269
2270 if (pci_bridge_d3_force)
2271 return true;
2272
2273 /*
2274 * It should be safe to put PCIe ports from 2015 or newer
2275 * to D3.
2276 */
2277 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
2278 year >= 2015) {
2279 return true;
2280 }
2281 break;
2282 }
2283
2284 return false;
2285 }
2286
2287 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2288 {
2289 bool *d3cold_ok = data;
2290
2291 if (/* The device needs to be allowed to go D3cold ... */
2292 dev->no_d3cold || !dev->d3cold_allowed ||
2293
2294 /* ... and if it is wakeup capable to do so from D3cold. */
2295 (device_may_wakeup(&dev->dev) &&
2296 !pci_pme_capable(dev, PCI_D3cold)) ||
2297
2298 /* If it is a bridge it must be allowed to go to D3. */
2299 !pci_power_manageable(dev))
2300
2301 *d3cold_ok = false;
2302
2303 return !*d3cold_ok;
2304 }
2305
2306 /*
2307 * pci_bridge_d3_update - Update bridge D3 capabilities
2308 * @dev: PCI device which is changed
2309 *
2310 * Update upstream bridge PM capabilities accordingly depending on if the
2311 * device PM configuration was changed or the device is being removed. The
2312 * change is also propagated upstream.
2313 */
2314 void pci_bridge_d3_update(struct pci_dev *dev)
2315 {
2316 bool remove = !device_is_registered(&dev->dev);
2317 struct pci_dev *bridge;
2318 bool d3cold_ok = true;
2319
2320 bridge = pci_upstream_bridge(dev);
2321 if (!bridge || !pci_bridge_d3_possible(bridge))
2322 return;
2323
2324 /*
2325 * If D3 is currently allowed for the bridge, removing one of its
2326 * children won't change that.
2327 */
2328 if (remove && bridge->bridge_d3)
2329 return;
2330
2331 /*
2332 * If D3 is currently allowed for the bridge and a child is added or
2333 * changed, disallowance of D3 can only be caused by that child, so
2334 * we only need to check that single device, not any of its siblings.
2335 *
2336 * If D3 is currently not allowed for the bridge, checking the device
2337 * first may allow us to skip checking its siblings.
2338 */
2339 if (!remove)
2340 pci_dev_check_d3cold(dev, &d3cold_ok);
2341
2342 /*
2343 * If D3 is currently not allowed for the bridge, this may be caused
2344 * either by the device being changed/removed or any of its siblings,
2345 * so we need to go through all children to find out if one of them
2346 * continues to block D3.
2347 */
2348 if (d3cold_ok && !bridge->bridge_d3)
2349 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2350 &d3cold_ok);
2351
2352 if (bridge->bridge_d3 != d3cold_ok) {
2353 bridge->bridge_d3 = d3cold_ok;
2354 /* Propagate change to upstream bridges */
2355 pci_bridge_d3_update(bridge);
2356 }
2357 }
2358
2359 /**
2360 * pci_d3cold_enable - Enable D3cold for device
2361 * @dev: PCI device to handle
2362 *
2363 * This function can be used in drivers to enable D3cold from the device
2364 * they handle. It also updates upstream PCI bridge PM capabilities
2365 * accordingly.
2366 */
2367 void pci_d3cold_enable(struct pci_dev *dev)
2368 {
2369 if (dev->no_d3cold) {
2370 dev->no_d3cold = false;
2371 pci_bridge_d3_update(dev);
2372 }
2373 }
2374 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2375
2376 /**
2377 * pci_d3cold_disable - Disable D3cold for device
2378 * @dev: PCI device to handle
2379 *
2380 * This function can be used in drivers to disable D3cold from the device
2381 * they handle. It also updates upstream PCI bridge PM capabilities
2382 * accordingly.
2383 */
2384 void pci_d3cold_disable(struct pci_dev *dev)
2385 {
2386 if (!dev->no_d3cold) {
2387 dev->no_d3cold = true;
2388 pci_bridge_d3_update(dev);
2389 }
2390 }
2391 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2392
2393 /**
2394 * pci_pm_init - Initialize PM functions of given PCI device
2395 * @dev: PCI device to handle.
2396 */
2397 void pci_pm_init(struct pci_dev *dev)
2398 {
2399 int pm;
2400 u16 pmc;
2401
2402 pm_runtime_forbid(&dev->dev);
2403 pm_runtime_set_active(&dev->dev);
2404 pm_runtime_enable(&dev->dev);
2405 device_enable_async_suspend(&dev->dev);
2406 dev->wakeup_prepared = false;
2407
2408 dev->pm_cap = 0;
2409 dev->pme_support = 0;
2410
2411 /* find PCI PM capability in list */
2412 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2413 if (!pm)
2414 return;
2415 /* Check device's ability to generate PME# */
2416 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2417
2418 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2419 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2420 pmc & PCI_PM_CAP_VER_MASK);
2421 return;
2422 }
2423
2424 dev->pm_cap = pm;
2425 dev->d3_delay = PCI_PM_D3_WAIT;
2426 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2427 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2428 dev->d3cold_allowed = true;
2429
2430 dev->d1_support = false;
2431 dev->d2_support = false;
2432 if (!pci_no_d1d2(dev)) {
2433 if (pmc & PCI_PM_CAP_D1)
2434 dev->d1_support = true;
2435 if (pmc & PCI_PM_CAP_D2)
2436 dev->d2_support = true;
2437
2438 if (dev->d1_support || dev->d2_support)
2439 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2440 dev->d1_support ? " D1" : "",
2441 dev->d2_support ? " D2" : "");
2442 }
2443
2444 pmc &= PCI_PM_CAP_PME_MASK;
2445 if (pmc) {
2446 dev_printk(KERN_DEBUG, &dev->dev,
2447 "PME# supported from%s%s%s%s%s\n",
2448 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2449 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2450 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2451 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2452 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2453 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2454 dev->pme_poll = true;
2455 /*
2456 * Make device's PM flags reflect the wake-up capability, but
2457 * let the user space enable it to wake up the system as needed.
2458 */
2459 device_set_wakeup_capable(&dev->dev, true);
2460 /* Disable the PME# generation functionality */
2461 pci_pme_active(dev, false);
2462 }
2463 }
2464
2465 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2466 {
2467 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2468
2469 switch (prop) {
2470 case PCI_EA_P_MEM:
2471 case PCI_EA_P_VF_MEM:
2472 flags |= IORESOURCE_MEM;
2473 break;
2474 case PCI_EA_P_MEM_PREFETCH:
2475 case PCI_EA_P_VF_MEM_PREFETCH:
2476 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2477 break;
2478 case PCI_EA_P_IO:
2479 flags |= IORESOURCE_IO;
2480 break;
2481 default:
2482 return 0;
2483 }
2484
2485 return flags;
2486 }
2487
2488 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2489 u8 prop)
2490 {
2491 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2492 return &dev->resource[bei];
2493 #ifdef CONFIG_PCI_IOV
2494 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2495 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2496 return &dev->resource[PCI_IOV_RESOURCES +
2497 bei - PCI_EA_BEI_VF_BAR0];
2498 #endif
2499 else if (bei == PCI_EA_BEI_ROM)
2500 return &dev->resource[PCI_ROM_RESOURCE];
2501 else
2502 return NULL;
2503 }
2504
2505 /* Read an Enhanced Allocation (EA) entry */
2506 static int pci_ea_read(struct pci_dev *dev, int offset)
2507 {
2508 struct resource *res;
2509 int ent_size, ent_offset = offset;
2510 resource_size_t start, end;
2511 unsigned long flags;
2512 u32 dw0, bei, base, max_offset;
2513 u8 prop;
2514 bool support_64 = (sizeof(resource_size_t) >= 8);
2515
2516 pci_read_config_dword(dev, ent_offset, &dw0);
2517 ent_offset += 4;
2518
2519 /* Entry size field indicates DWORDs after 1st */
2520 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2521
2522 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2523 goto out;
2524
2525 bei = (dw0 & PCI_EA_BEI) >> 4;
2526 prop = (dw0 & PCI_EA_PP) >> 8;
2527
2528 /*
2529 * If the Property is in the reserved range, try the Secondary
2530 * Property instead.
2531 */
2532 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2533 prop = (dw0 & PCI_EA_SP) >> 16;
2534 if (prop > PCI_EA_P_BRIDGE_IO)
2535 goto out;
2536
2537 res = pci_ea_get_resource(dev, bei, prop);
2538 if (!res) {
2539 dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
2540 goto out;
2541 }
2542
2543 flags = pci_ea_flags(dev, prop);
2544 if (!flags) {
2545 dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
2546 goto out;
2547 }
2548
2549 /* Read Base */
2550 pci_read_config_dword(dev, ent_offset, &base);
2551 start = (base & PCI_EA_FIELD_MASK);
2552 ent_offset += 4;
2553
2554 /* Read MaxOffset */
2555 pci_read_config_dword(dev, ent_offset, &max_offset);
2556 ent_offset += 4;
2557
2558 /* Read Base MSBs (if 64-bit entry) */
2559 if (base & PCI_EA_IS_64) {
2560 u32 base_upper;
2561
2562 pci_read_config_dword(dev, ent_offset, &base_upper);
2563 ent_offset += 4;
2564
2565 flags |= IORESOURCE_MEM_64;
2566
2567 /* entry starts above 32-bit boundary, can't use */
2568 if (!support_64 && base_upper)
2569 goto out;
2570
2571 if (support_64)
2572 start |= ((u64)base_upper << 32);
2573 }
2574
2575 end = start + (max_offset | 0x03);
2576
2577 /* Read MaxOffset MSBs (if 64-bit entry) */
2578 if (max_offset & PCI_EA_IS_64) {
2579 u32 max_offset_upper;
2580
2581 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2582 ent_offset += 4;
2583
2584 flags |= IORESOURCE_MEM_64;
2585
2586 /* entry too big, can't use */
2587 if (!support_64 && max_offset_upper)
2588 goto out;
2589
2590 if (support_64)
2591 end += ((u64)max_offset_upper << 32);
2592 }
2593
2594 if (end < start) {
2595 dev_err(&dev->dev, "EA Entry crosses address boundary\n");
2596 goto out;
2597 }
2598
2599 if (ent_size != ent_offset - offset) {
2600 dev_err(&dev->dev,
2601 "EA Entry Size (%d) does not match length read (%d)\n",
2602 ent_size, ent_offset - offset);
2603 goto out;
2604 }
2605
2606 res->name = pci_name(dev);
2607 res->start = start;
2608 res->end = end;
2609 res->flags = flags;
2610
2611 if (bei <= PCI_EA_BEI_BAR5)
2612 dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2613 bei, res, prop);
2614 else if (bei == PCI_EA_BEI_ROM)
2615 dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2616 res, prop);
2617 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2618 dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2619 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2620 else
2621 dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2622 bei, res, prop);
2623
2624 out:
2625 return offset + ent_size;
2626 }
2627
2628 /* Enhanced Allocation Initialization */
2629 void pci_ea_init(struct pci_dev *dev)
2630 {
2631 int ea;
2632 u8 num_ent;
2633 int offset;
2634 int i;
2635
2636 /* find PCI EA capability in list */
2637 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2638 if (!ea)
2639 return;
2640
2641 /* determine the number of entries */
2642 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2643 &num_ent);
2644 num_ent &= PCI_EA_NUM_ENT_MASK;
2645
2646 offset = ea + PCI_EA_FIRST_ENT;
2647
2648 /* Skip DWORD 2 for type 1 functions */
2649 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2650 offset += 4;
2651
2652 /* parse each EA entry */
2653 for (i = 0; i < num_ent; ++i)
2654 offset = pci_ea_read(dev, offset);
2655 }
2656
2657 static void pci_add_saved_cap(struct pci_dev *pci_dev,
2658 struct pci_cap_saved_state *new_cap)
2659 {
2660 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2661 }
2662
2663 /**
2664 * _pci_add_cap_save_buffer - allocate buffer for saving given
2665 * capability registers
2666 * @dev: the PCI device
2667 * @cap: the capability to allocate the buffer for
2668 * @extended: Standard or Extended capability ID
2669 * @size: requested size of the buffer
2670 */
2671 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2672 bool extended, unsigned int size)
2673 {
2674 int pos;
2675 struct pci_cap_saved_state *save_state;
2676
2677 if (extended)
2678 pos = pci_find_ext_capability(dev, cap);
2679 else
2680 pos = pci_find_capability(dev, cap);
2681
2682 if (!pos)
2683 return 0;
2684
2685 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2686 if (!save_state)
2687 return -ENOMEM;
2688
2689 save_state->cap.cap_nr = cap;
2690 save_state->cap.cap_extended = extended;
2691 save_state->cap.size = size;
2692 pci_add_saved_cap(dev, save_state);
2693
2694 return 0;
2695 }
2696
2697 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2698 {
2699 return _pci_add_cap_save_buffer(dev, cap, false, size);
2700 }
2701
2702 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2703 {
2704 return _pci_add_cap_save_buffer(dev, cap, true, size);
2705 }
2706
2707 /**
2708 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2709 * @dev: the PCI device
2710 */
2711 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2712 {
2713 int error;
2714
2715 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2716 PCI_EXP_SAVE_REGS * sizeof(u16));
2717 if (error)
2718 dev_err(&dev->dev,
2719 "unable to preallocate PCI Express save buffer\n");
2720
2721 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2722 if (error)
2723 dev_err(&dev->dev,
2724 "unable to preallocate PCI-X save buffer\n");
2725
2726 pci_allocate_vc_save_buffers(dev);
2727 }
2728
2729 void pci_free_cap_save_buffers(struct pci_dev *dev)
2730 {
2731 struct pci_cap_saved_state *tmp;
2732 struct hlist_node *n;
2733
2734 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2735 kfree(tmp);
2736 }
2737
2738 /**
2739 * pci_configure_ari - enable or disable ARI forwarding
2740 * @dev: the PCI device
2741 *
2742 * If @dev and its upstream bridge both support ARI, enable ARI in the
2743 * bridge. Otherwise, disable ARI in the bridge.
2744 */
2745 void pci_configure_ari(struct pci_dev *dev)
2746 {
2747 u32 cap;
2748 struct pci_dev *bridge;
2749
2750 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2751 return;
2752
2753 bridge = dev->bus->self;
2754 if (!bridge)
2755 return;
2756
2757 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2758 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2759 return;
2760
2761 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2762 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2763 PCI_EXP_DEVCTL2_ARI);
2764 bridge->ari_enabled = 1;
2765 } else {
2766 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2767 PCI_EXP_DEVCTL2_ARI);
2768 bridge->ari_enabled = 0;
2769 }
2770 }
2771
2772 static int pci_acs_enable;
2773
2774 /**
2775 * pci_request_acs - ask for ACS to be enabled if supported
2776 */
2777 void pci_request_acs(void)
2778 {
2779 pci_acs_enable = 1;
2780 }
2781
2782 /**
2783 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
2784 * @dev: the PCI device
2785 */
2786 static void pci_std_enable_acs(struct pci_dev *dev)
2787 {
2788 int pos;
2789 u16 cap;
2790 u16 ctrl;
2791
2792 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2793 if (!pos)
2794 return;
2795
2796 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2797 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2798
2799 /* Source Validation */
2800 ctrl |= (cap & PCI_ACS_SV);
2801
2802 /* P2P Request Redirect */
2803 ctrl |= (cap & PCI_ACS_RR);
2804
2805 /* P2P Completion Redirect */
2806 ctrl |= (cap & PCI_ACS_CR);
2807
2808 /* Upstream Forwarding */
2809 ctrl |= (cap & PCI_ACS_UF);
2810
2811 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2812 }
2813
2814 /**
2815 * pci_enable_acs - enable ACS if hardware support it
2816 * @dev: the PCI device
2817 */
2818 void pci_enable_acs(struct pci_dev *dev)
2819 {
2820 if (!pci_acs_enable)
2821 return;
2822
2823 if (!pci_dev_specific_enable_acs(dev))
2824 return;
2825
2826 pci_std_enable_acs(dev);
2827 }
2828
2829 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2830 {
2831 int pos;
2832 u16 cap, ctrl;
2833
2834 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2835 if (!pos)
2836 return false;
2837
2838 /*
2839 * Except for egress control, capabilities are either required
2840 * or only required if controllable. Features missing from the
2841 * capability field can therefore be assumed as hard-wired enabled.
2842 */
2843 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2844 acs_flags &= (cap | PCI_ACS_EC);
2845
2846 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2847 return (ctrl & acs_flags) == acs_flags;
2848 }
2849
2850 /**
2851 * pci_acs_enabled - test ACS against required flags for a given device
2852 * @pdev: device to test
2853 * @acs_flags: required PCI ACS flags
2854 *
2855 * Return true if the device supports the provided flags. Automatically
2856 * filters out flags that are not implemented on multifunction devices.
2857 *
2858 * Note that this interface checks the effective ACS capabilities of the
2859 * device rather than the actual capabilities. For instance, most single
2860 * function endpoints are not required to support ACS because they have no
2861 * opportunity for peer-to-peer access. We therefore return 'true'
2862 * regardless of whether the device exposes an ACS capability. This makes
2863 * it much easier for callers of this function to ignore the actual type
2864 * or topology of the device when testing ACS support.
2865 */
2866 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2867 {
2868 int ret;
2869
2870 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2871 if (ret >= 0)
2872 return ret > 0;
2873
2874 /*
2875 * Conventional PCI and PCI-X devices never support ACS, either
2876 * effectively or actually. The shared bus topology implies that
2877 * any device on the bus can receive or snoop DMA.
2878 */
2879 if (!pci_is_pcie(pdev))
2880 return false;
2881
2882 switch (pci_pcie_type(pdev)) {
2883 /*
2884 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2885 * but since their primary interface is PCI/X, we conservatively
2886 * handle them as we would a non-PCIe device.
2887 */
2888 case PCI_EXP_TYPE_PCIE_BRIDGE:
2889 /*
2890 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2891 * applicable... must never implement an ACS Extended Capability...".
2892 * This seems arbitrary, but we take a conservative interpretation
2893 * of this statement.
2894 */
2895 case PCI_EXP_TYPE_PCI_BRIDGE:
2896 case PCI_EXP_TYPE_RC_EC:
2897 return false;
2898 /*
2899 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2900 * implement ACS in order to indicate their peer-to-peer capabilities,
2901 * regardless of whether they are single- or multi-function devices.
2902 */
2903 case PCI_EXP_TYPE_DOWNSTREAM:
2904 case PCI_EXP_TYPE_ROOT_PORT:
2905 return pci_acs_flags_enabled(pdev, acs_flags);
2906 /*
2907 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2908 * implemented by the remaining PCIe types to indicate peer-to-peer
2909 * capabilities, but only when they are part of a multifunction
2910 * device. The footnote for section 6.12 indicates the specific
2911 * PCIe types included here.
2912 */
2913 case PCI_EXP_TYPE_ENDPOINT:
2914 case PCI_EXP_TYPE_UPSTREAM:
2915 case PCI_EXP_TYPE_LEG_END:
2916 case PCI_EXP_TYPE_RC_END:
2917 if (!pdev->multifunction)
2918 break;
2919
2920 return pci_acs_flags_enabled(pdev, acs_flags);
2921 }
2922
2923 /*
2924 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2925 * to single function devices with the exception of downstream ports.
2926 */
2927 return true;
2928 }
2929
2930 /**
2931 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2932 * @start: starting downstream device
2933 * @end: ending upstream device or NULL to search to the root bus
2934 * @acs_flags: required flags
2935 *
2936 * Walk up a device tree from start to end testing PCI ACS support. If
2937 * any step along the way does not support the required flags, return false.
2938 */
2939 bool pci_acs_path_enabled(struct pci_dev *start,
2940 struct pci_dev *end, u16 acs_flags)
2941 {
2942 struct pci_dev *pdev, *parent = start;
2943
2944 do {
2945 pdev = parent;
2946
2947 if (!pci_acs_enabled(pdev, acs_flags))
2948 return false;
2949
2950 if (pci_is_root_bus(pdev->bus))
2951 return (end == NULL);
2952
2953 parent = pdev->bus->self;
2954 } while (pdev != end);
2955
2956 return true;
2957 }
2958
2959 /**
2960 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2961 * @dev: the PCI device
2962 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2963 *
2964 * Perform INTx swizzling for a device behind one level of bridge. This is
2965 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2966 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2967 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2968 * the PCI Express Base Specification, Revision 2.1)
2969 */
2970 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2971 {
2972 int slot;
2973
2974 if (pci_ari_enabled(dev->bus))
2975 slot = 0;
2976 else
2977 slot = PCI_SLOT(dev->devfn);
2978
2979 return (((pin - 1) + slot) % 4) + 1;
2980 }
2981
2982 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2983 {
2984 u8 pin;
2985
2986 pin = dev->pin;
2987 if (!pin)
2988 return -1;
2989
2990 while (!pci_is_root_bus(dev->bus)) {
2991 pin = pci_swizzle_interrupt_pin(dev, pin);
2992 dev = dev->bus->self;
2993 }
2994 *bridge = dev;
2995 return pin;
2996 }
2997
2998 /**
2999 * pci_common_swizzle - swizzle INTx all the way to root bridge
3000 * @dev: the PCI device
3001 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3002 *
3003 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3004 * bridges all the way up to a PCI root bus.
3005 */
3006 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3007 {
3008 u8 pin = *pinp;
3009
3010 while (!pci_is_root_bus(dev->bus)) {
3011 pin = pci_swizzle_interrupt_pin(dev, pin);
3012 dev = dev->bus->self;
3013 }
3014 *pinp = pin;
3015 return PCI_SLOT(dev->devfn);
3016 }
3017 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3018
3019 /**
3020 * pci_release_region - Release a PCI bar
3021 * @pdev: PCI device whose resources were previously reserved by pci_request_region
3022 * @bar: BAR to release
3023 *
3024 * Releases the PCI I/O and memory resources previously reserved by a
3025 * successful call to pci_request_region. Call this function only
3026 * after all use of the PCI regions has ceased.
3027 */
3028 void pci_release_region(struct pci_dev *pdev, int bar)
3029 {
3030 struct pci_devres *dr;
3031
3032 if (pci_resource_len(pdev, bar) == 0)
3033 return;
3034 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3035 release_region(pci_resource_start(pdev, bar),
3036 pci_resource_len(pdev, bar));
3037 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3038 release_mem_region(pci_resource_start(pdev, bar),
3039 pci_resource_len(pdev, bar));
3040
3041 dr = find_pci_dr(pdev);
3042 if (dr)
3043 dr->region_mask &= ~(1 << bar);
3044 }
3045 EXPORT_SYMBOL(pci_release_region);
3046
3047 /**
3048 * __pci_request_region - Reserved PCI I/O and memory resource
3049 * @pdev: PCI device whose resources are to be reserved
3050 * @bar: BAR to be reserved
3051 * @res_name: Name to be associated with resource.
3052 * @exclusive: whether the region access is exclusive or not
3053 *
3054 * Mark the PCI region associated with PCI device @pdev BR @bar as
3055 * being reserved by owner @res_name. Do not access any
3056 * address inside the PCI regions unless this call returns
3057 * successfully.
3058 *
3059 * If @exclusive is set, then the region is marked so that userspace
3060 * is explicitly not allowed to map the resource via /dev/mem or
3061 * sysfs MMIO access.
3062 *
3063 * Returns 0 on success, or %EBUSY on error. A warning
3064 * message is also printed on failure.
3065 */
3066 static int __pci_request_region(struct pci_dev *pdev, int bar,
3067 const char *res_name, int exclusive)
3068 {
3069 struct pci_devres *dr;
3070
3071 if (pci_resource_len(pdev, bar) == 0)
3072 return 0;
3073
3074 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3075 if (!request_region(pci_resource_start(pdev, bar),
3076 pci_resource_len(pdev, bar), res_name))
3077 goto err_out;
3078 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3079 if (!__request_mem_region(pci_resource_start(pdev, bar),
3080 pci_resource_len(pdev, bar), res_name,
3081 exclusive))
3082 goto err_out;
3083 }
3084
3085 dr = find_pci_dr(pdev);
3086 if (dr)
3087 dr->region_mask |= 1 << bar;
3088
3089 return 0;
3090
3091 err_out:
3092 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
3093 &pdev->resource[bar]);
3094 return -EBUSY;
3095 }
3096
3097 /**
3098 * pci_request_region - Reserve PCI I/O and memory resource
3099 * @pdev: PCI device whose resources are to be reserved
3100 * @bar: BAR to be reserved
3101 * @res_name: Name to be associated with resource
3102 *
3103 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3104 * being reserved by owner @res_name. Do not access any
3105 * address inside the PCI regions unless this call returns
3106 * successfully.
3107 *
3108 * Returns 0 on success, or %EBUSY on error. A warning
3109 * message is also printed on failure.
3110 */
3111 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3112 {
3113 return __pci_request_region(pdev, bar, res_name, 0);
3114 }
3115 EXPORT_SYMBOL(pci_request_region);
3116
3117 /**
3118 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
3119 * @pdev: PCI device whose resources are to be reserved
3120 * @bar: BAR to be reserved
3121 * @res_name: Name to be associated with resource.
3122 *
3123 * Mark the PCI region associated with PCI device @pdev BR @bar as
3124 * being reserved by owner @res_name. Do not access any
3125 * address inside the PCI regions unless this call returns
3126 * successfully.
3127 *
3128 * Returns 0 on success, or %EBUSY on error. A warning
3129 * message is also printed on failure.
3130 *
3131 * The key difference that _exclusive makes it that userspace is
3132 * explicitly not allowed to map the resource via /dev/mem or
3133 * sysfs.
3134 */
3135 int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3136 const char *res_name)
3137 {
3138 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3139 }
3140 EXPORT_SYMBOL(pci_request_region_exclusive);
3141
3142 /**
3143 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3144 * @pdev: PCI device whose resources were previously reserved
3145 * @bars: Bitmask of BARs to be released
3146 *
3147 * Release selected PCI I/O and memory resources previously reserved.
3148 * Call this function only after all use of the PCI regions has ceased.
3149 */
3150 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3151 {
3152 int i;
3153
3154 for (i = 0; i < 6; i++)
3155 if (bars & (1 << i))
3156 pci_release_region(pdev, i);
3157 }
3158 EXPORT_SYMBOL(pci_release_selected_regions);
3159
3160 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3161 const char *res_name, int excl)
3162 {
3163 int i;
3164
3165 for (i = 0; i < 6; i++)
3166 if (bars & (1 << i))
3167 if (__pci_request_region(pdev, i, res_name, excl))
3168 goto err_out;
3169 return 0;
3170
3171 err_out:
3172 while (--i >= 0)
3173 if (bars & (1 << i))
3174 pci_release_region(pdev, i);
3175
3176 return -EBUSY;
3177 }
3178
3179
3180 /**
3181 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3182 * @pdev: PCI device whose resources are to be reserved
3183 * @bars: Bitmask of BARs to be requested
3184 * @res_name: Name to be associated with resource
3185 */
3186 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3187 const char *res_name)
3188 {
3189 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3190 }
3191 EXPORT_SYMBOL(pci_request_selected_regions);
3192
3193 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3194 const char *res_name)
3195 {
3196 return __pci_request_selected_regions(pdev, bars, res_name,
3197 IORESOURCE_EXCLUSIVE);
3198 }
3199 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3200
3201 /**
3202 * pci_release_regions - Release reserved PCI I/O and memory resources
3203 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
3204 *
3205 * Releases all PCI I/O and memory resources previously reserved by a
3206 * successful call to pci_request_regions. Call this function only
3207 * after all use of the PCI regions has ceased.
3208 */
3209
3210 void pci_release_regions(struct pci_dev *pdev)
3211 {
3212 pci_release_selected_regions(pdev, (1 << 6) - 1);
3213 }
3214 EXPORT_SYMBOL(pci_release_regions);
3215
3216 /**
3217 * pci_request_regions - Reserved PCI I/O and memory resources
3218 * @pdev: PCI device whose resources are to be reserved
3219 * @res_name: Name to be associated with resource.
3220 *
3221 * Mark all PCI regions associated with PCI device @pdev as
3222 * being reserved by owner @res_name. Do not access any
3223 * address inside the PCI regions unless this call returns
3224 * successfully.
3225 *
3226 * Returns 0 on success, or %EBUSY on error. A warning
3227 * message is also printed on failure.
3228 */
3229 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3230 {
3231 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3232 }
3233 EXPORT_SYMBOL(pci_request_regions);
3234
3235 /**
3236 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3237 * @pdev: PCI device whose resources are to be reserved
3238 * @res_name: Name to be associated with resource.
3239 *
3240 * Mark all PCI regions associated with PCI device @pdev as
3241 * being reserved by owner @res_name. Do not access any
3242 * address inside the PCI regions unless this call returns
3243 * successfully.
3244 *
3245 * pci_request_regions_exclusive() will mark the region so that
3246 * /dev/mem and the sysfs MMIO access will not be allowed.
3247 *
3248 * Returns 0 on success, or %EBUSY on error. A warning
3249 * message is also printed on failure.
3250 */
3251 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3252 {
3253 return pci_request_selected_regions_exclusive(pdev,
3254 ((1 << 6) - 1), res_name);
3255 }
3256 EXPORT_SYMBOL(pci_request_regions_exclusive);
3257
3258 #if defined(PCI_IOBASE) && !defined(CONFIG_LIBIO)
3259 struct io_range {
3260 struct list_head list;
3261 phys_addr_t start;
3262 resource_size_t size;
3263 };
3264
3265 static LIST_HEAD(io_range_list);
3266 static DEFINE_SPINLOCK(io_range_lock);
3267 #endif
3268
3269 /*
3270 * Record the PCI IO range (expressed as CPU physical address + size).
3271 * Return a negative value if an error has occured, zero otherwise
3272 */
3273 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3274 resource_size_t size)
3275 {
3276 int err = 0;
3277
3278 #ifdef PCI_IOBASE
3279 #ifdef CONFIG_LIBIO
3280 struct libio_range *range, *tmprange;
3281
3282 if (!size || addr + size < addr)
3283 return -EINVAL;
3284
3285 WARN_ON(!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(size));
3286
3287 range = kzalloc(sizeof(*range), GFP_KERNEL);
3288 if (!range)
3289 return -ENOMEM;
3290 range->node = fwnode;
3291 range->flags = IO_CPU_MMIO;
3292 range->size = size;
3293 range->hw_start = addr;
3294
3295 tmprange = register_libio_range(range);
3296 if (tmprange != range) {
3297 kfree(range);
3298 if (IS_ERR(tmprange))
3299 return -EFAULT;
3300 }
3301 #else
3302 struct io_range *range;
3303 resource_size_t allocated_size = 0;
3304
3305 /* check if the range hasn't been previously recorded */
3306 spin_lock(&io_range_lock);
3307 list_for_each_entry(range, &io_range_list, list) {
3308 if (addr >= range->start && addr + size <= range->start + size) {
3309 /* range already registered, bail out */
3310 goto end_register;
3311 }
3312 allocated_size += range->size;
3313 }
3314
3315 /* range not registed yet, check for available space */
3316 if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
3317 /* if it's too big check if 64K space can be reserved */
3318 if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
3319 err = -E2BIG;
3320 goto end_register;
3321 }
3322
3323 size = SZ_64K;
3324 pr_warn("Requested IO range too big, new size set to 64K\n");
3325 }
3326
3327 /* add the range to the list */
3328 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3329 if (!range) {
3330 err = -ENOMEM;
3331 goto end_register;
3332 }
3333
3334 range->start = addr;
3335 range->size = size;
3336
3337 list_add_tail(&range->list, &io_range_list);
3338
3339 end_register:
3340 spin_unlock(&io_range_lock);
3341 #endif /* CONFIG_LIBIO */
3342 #endif /* PCI_IOBASE */
3343
3344 return err;
3345 }
3346
3347 phys_addr_t pci_pio_to_address(unsigned long pio)
3348 {
3349 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3350
3351 #ifdef PCI_IOBASE
3352 #ifdef CONFIG_LIBIO
3353 if (pio > IO_SPACE_LIMIT)
3354 return address;
3355
3356 address = libio_to_hwaddr(pio);
3357 #else
3358 struct io_range *range;
3359 resource_size_t allocated_size = 0;
3360
3361 if (pio > IO_SPACE_LIMIT)
3362 return address;
3363
3364 spin_lock(&io_range_lock);
3365 list_for_each_entry(range, &io_range_list, list) {
3366 if (pio >= allocated_size && pio < allocated_size + range->size) {
3367 address = range->start + pio - allocated_size;
3368 break;
3369 }
3370 allocated_size += range->size;
3371 }
3372 spin_unlock(&io_range_lock);
3373 #endif /* CONFIG_LIBIO */
3374 #endif /* PCI_IOBASE */
3375
3376 return address;
3377 }
3378
3379 unsigned long __weak pci_address_to_pio(phys_addr_t address)
3380 {
3381 #ifdef PCI_IOBASE
3382 #ifdef CONFIG_LIBIO
3383 return libio_translate_cpuaddr(address);
3384 #else
3385 struct io_range *res;
3386 resource_size_t offset = 0;
3387 unsigned long addr = -1;
3388
3389 spin_lock(&io_range_lock);
3390 list_for_each_entry(res, &io_range_list, list) {
3391 if (address >= res->start && address < res->start + res->size) {
3392 addr = address - res->start + offset;
3393 break;
3394 }
3395 offset += res->size;
3396 }
3397 spin_unlock(&io_range_lock);
3398
3399 return addr;
3400 #endif
3401 #else
3402 #ifndef CONFIG_LIBIO
3403 if (address > IO_SPACE_LIMIT)
3404 return (unsigned long)-1;
3405 #endif
3406 return (unsigned long) address;
3407 #endif
3408 }
3409
3410 /**
3411 * pci_remap_iospace - Remap the memory mapped I/O space
3412 * @res: Resource describing the I/O space
3413 * @phys_addr: physical address of range to be mapped
3414 *
3415 * Remap the memory mapped I/O space described by the @res
3416 * and the CPU physical address @phys_addr into virtual address space.
3417 * Only architectures that have memory mapped IO functions defined
3418 * (and the PCI_IOBASE value defined) should call this function.
3419 */
3420 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3421 {
3422 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3423 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3424
3425 if (!(res->flags & IORESOURCE_IO))
3426 return -EINVAL;
3427
3428 if (res->end > IO_SPACE_LIMIT)
3429 return -EINVAL;
3430
3431 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3432 pgprot_device(PAGE_KERNEL));
3433 #else
3434 /* this architecture does not have memory mapped I/O space,
3435 so this function should never be called */
3436 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3437 return -ENODEV;
3438 #endif
3439 }
3440 EXPORT_SYMBOL(pci_remap_iospace);
3441
3442 /**
3443 * pci_unmap_iospace - Unmap the memory mapped I/O space
3444 * @res: resource to be unmapped
3445 *
3446 * Unmap the CPU virtual address @res from virtual address space.
3447 * Only architectures that have memory mapped IO functions defined
3448 * (and the PCI_IOBASE value defined) should call this function.
3449 */
3450 void pci_unmap_iospace(struct resource *res)
3451 {
3452 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3453 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3454
3455 unmap_kernel_range(vaddr, resource_size(res));
3456 #endif
3457 }
3458 EXPORT_SYMBOL(pci_unmap_iospace);
3459
3460 /**
3461 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
3462 * @dev: Generic device to remap IO address for
3463 * @offset: Resource address to map
3464 * @size: Size of map
3465 *
3466 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
3467 * detach.
3468 */
3469 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3470 resource_size_t offset,
3471 resource_size_t size)
3472 {
3473 void __iomem **ptr, *addr;
3474
3475 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3476 if (!ptr)
3477 return NULL;
3478
3479 addr = pci_remap_cfgspace(offset, size);
3480 if (addr) {
3481 *ptr = addr;
3482 devres_add(dev, ptr);
3483 } else
3484 devres_free(ptr);
3485
3486 return addr;
3487 }
3488 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3489
3490 /**
3491 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
3492 * @dev: generic device to handle the resource for
3493 * @res: configuration space resource to be handled
3494 *
3495 * Checks that a resource is a valid memory region, requests the memory
3496 * region and ioremaps with pci_remap_cfgspace() API that ensures the
3497 * proper PCI configuration space memory attributes are guaranteed.
3498 *
3499 * All operations are managed and will be undone on driver detach.
3500 *
3501 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
3502 * on failure. Usage example:
3503 *
3504 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3505 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
3506 * if (IS_ERR(base))
3507 * return PTR_ERR(base);
3508 */
3509 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3510 struct resource *res)
3511 {
3512 resource_size_t size;
3513 const char *name;
3514 void __iomem *dest_ptr;
3515
3516 BUG_ON(!dev);
3517
3518 if (!res || resource_type(res) != IORESOURCE_MEM) {
3519 dev_err(dev, "invalid resource\n");
3520 return IOMEM_ERR_PTR(-EINVAL);
3521 }
3522
3523 size = resource_size(res);
3524 name = res->name ?: dev_name(dev);
3525
3526 if (!devm_request_mem_region(dev, res->start, size, name)) {
3527 dev_err(dev, "can't request region for resource %pR\n", res);
3528 return IOMEM_ERR_PTR(-EBUSY);
3529 }
3530
3531 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3532 if (!dest_ptr) {
3533 dev_err(dev, "ioremap failed for resource %pR\n", res);
3534 devm_release_mem_region(dev, res->start, size);
3535 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3536 }
3537
3538 return dest_ptr;
3539 }
3540 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3541
3542 static void __pci_set_master(struct pci_dev *dev, bool enable)
3543 {
3544 u16 old_cmd, cmd;
3545
3546 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3547 if (enable)
3548 cmd = old_cmd | PCI_COMMAND_MASTER;
3549 else
3550 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3551 if (cmd != old_cmd) {
3552 dev_dbg(&dev->dev, "%s bus mastering\n",
3553 enable ? "enabling" : "disabling");
3554 pci_write_config_word(dev, PCI_COMMAND, cmd);
3555 }
3556 dev->is_busmaster = enable;
3557 }
3558
3559 /**
3560 * pcibios_setup - process "pci=" kernel boot arguments
3561 * @str: string used to pass in "pci=" kernel boot arguments
3562 *
3563 * Process kernel boot arguments. This is the default implementation.
3564 * Architecture specific implementations can override this as necessary.
3565 */
3566 char * __weak __init pcibios_setup(char *str)
3567 {
3568 return str;
3569 }
3570
3571 /**
3572 * pcibios_set_master - enable PCI bus-mastering for device dev
3573 * @dev: the PCI device to enable
3574 *
3575 * Enables PCI bus-mastering for the device. This is the default
3576 * implementation. Architecture specific implementations can override
3577 * this if necessary.
3578 */
3579 void __weak pcibios_set_master(struct pci_dev *dev)
3580 {
3581 u8 lat;
3582
3583 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3584 if (pci_is_pcie(dev))
3585 return;
3586
3587 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3588 if (lat < 16)
3589 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3590 else if (lat > pcibios_max_latency)
3591 lat = pcibios_max_latency;
3592 else
3593 return;
3594
3595 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3596 }
3597
3598 /**
3599 * pci_set_master - enables bus-mastering for device dev
3600 * @dev: the PCI device to enable
3601 *
3602 * Enables bus-mastering on the device and calls pcibios_set_master()
3603 * to do the needed arch specific settings.
3604 */
3605 void pci_set_master(struct pci_dev *dev)
3606 {
3607 __pci_set_master(dev, true);
3608 pcibios_set_master(dev);
3609 }
3610 EXPORT_SYMBOL(pci_set_master);
3611
3612 /**
3613 * pci_clear_master - disables bus-mastering for device dev
3614 * @dev: the PCI device to disable
3615 */
3616 void pci_clear_master(struct pci_dev *dev)
3617 {
3618 __pci_set_master(dev, false);
3619 }
3620 EXPORT_SYMBOL(pci_clear_master);
3621
3622 /**
3623 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3624 * @dev: the PCI device for which MWI is to be enabled
3625 *
3626 * Helper function for pci_set_mwi.
3627 * Originally copied from drivers/net/acenic.c.
3628 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3629 *
3630 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3631 */
3632 int pci_set_cacheline_size(struct pci_dev *dev)
3633 {
3634 u8 cacheline_size;
3635
3636 if (!pci_cache_line_size)
3637 return -EINVAL;
3638
3639 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3640 equal to or multiple of the right value. */
3641 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3642 if (cacheline_size >= pci_cache_line_size &&
3643 (cacheline_size % pci_cache_line_size) == 0)
3644 return 0;
3645
3646 /* Write the correct value. */
3647 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3648 /* Read it back. */
3649 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3650 if (cacheline_size == pci_cache_line_size)
3651 return 0;
3652
3653 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
3654 pci_cache_line_size << 2);
3655
3656 return -EINVAL;
3657 }
3658 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3659
3660 /**
3661 * pci_set_mwi - enables memory-write-invalidate PCI transaction
3662 * @dev: the PCI device for which MWI is enabled
3663 *
3664 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3665 *
3666 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3667 */
3668 int pci_set_mwi(struct pci_dev *dev)
3669 {
3670 #ifdef PCI_DISABLE_MWI
3671 return 0;
3672 #else
3673 int rc;
3674 u16 cmd;
3675
3676 rc = pci_set_cacheline_size(dev);
3677 if (rc)
3678 return rc;
3679
3680 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3681 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3682 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
3683 cmd |= PCI_COMMAND_INVALIDATE;
3684 pci_write_config_word(dev, PCI_COMMAND, cmd);
3685 }
3686 return 0;
3687 #endif
3688 }
3689 EXPORT_SYMBOL(pci_set_mwi);
3690
3691 /**
3692 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3693 * @dev: the PCI device for which MWI is enabled
3694 *
3695 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3696 * Callers are not required to check the return value.
3697 *
3698 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3699 */
3700 int pci_try_set_mwi(struct pci_dev *dev)
3701 {
3702 #ifdef PCI_DISABLE_MWI
3703 return 0;
3704 #else
3705 return pci_set_mwi(dev);
3706 #endif
3707 }
3708 EXPORT_SYMBOL(pci_try_set_mwi);
3709
3710 /**
3711 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3712 * @dev: the PCI device to disable
3713 *
3714 * Disables PCI Memory-Write-Invalidate transaction on the device
3715 */
3716 void pci_clear_mwi(struct pci_dev *dev)
3717 {
3718 #ifndef PCI_DISABLE_MWI
3719 u16 cmd;
3720
3721 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3722 if (cmd & PCI_COMMAND_INVALIDATE) {
3723 cmd &= ~PCI_COMMAND_INVALIDATE;
3724 pci_write_config_word(dev, PCI_COMMAND, cmd);
3725 }
3726 #endif
3727 }
3728 EXPORT_SYMBOL(pci_clear_mwi);
3729
3730 /**
3731 * pci_intx - enables/disables PCI INTx for device dev
3732 * @pdev: the PCI device to operate on
3733 * @enable: boolean: whether to enable or disable PCI INTx
3734 *
3735 * Enables/disables PCI INTx for device dev
3736 */
3737 void pci_intx(struct pci_dev *pdev, int enable)
3738 {
3739 u16 pci_command, new;
3740
3741 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3742
3743 if (enable)
3744 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3745 else
3746 new = pci_command | PCI_COMMAND_INTX_DISABLE;
3747
3748 if (new != pci_command) {
3749 struct pci_devres *dr;
3750
3751 pci_write_config_word(pdev, PCI_COMMAND, new);
3752
3753 dr = find_pci_dr(pdev);
3754 if (dr && !dr->restore_intx) {
3755 dr->restore_intx = 1;
3756 dr->orig_intx = !enable;
3757 }
3758 }
3759 }
3760 EXPORT_SYMBOL_GPL(pci_intx);
3761
3762 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3763 {
3764 struct pci_bus *bus = dev->bus;
3765 bool mask_updated = true;
3766 u32 cmd_status_dword;
3767 u16 origcmd, newcmd;
3768 unsigned long flags;
3769 bool irq_pending;
3770
3771 /*
3772 * We do a single dword read to retrieve both command and status.
3773 * Document assumptions that make this possible.
3774 */
3775 BUILD_BUG_ON(PCI_COMMAND % 4);
3776 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3777
3778 raw_spin_lock_irqsave(&pci_lock, flags);
3779
3780 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3781
3782 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3783
3784 /*
3785 * Check interrupt status register to see whether our device
3786 * triggered the interrupt (when masking) or the next IRQ is
3787 * already pending (when unmasking).
3788 */
3789 if (mask != irq_pending) {
3790 mask_updated = false;
3791 goto done;
3792 }
3793
3794 origcmd = cmd_status_dword;
3795 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3796 if (mask)
3797 newcmd |= PCI_COMMAND_INTX_DISABLE;
3798 if (newcmd != origcmd)
3799 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3800
3801 done:
3802 raw_spin_unlock_irqrestore(&pci_lock, flags);
3803
3804 return mask_updated;
3805 }
3806
3807 /**
3808 * pci_check_and_mask_intx - mask INTx on pending interrupt
3809 * @dev: the PCI device to operate on
3810 *
3811 * Check if the device dev has its INTx line asserted, mask it and
3812 * return true in that case. False is returned if no interrupt was
3813 * pending.
3814 */
3815 bool pci_check_and_mask_intx(struct pci_dev *dev)
3816 {
3817 return pci_check_and_set_intx_mask(dev, true);
3818 }
3819 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3820
3821 /**
3822 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3823 * @dev: the PCI device to operate on
3824 *
3825 * Check if the device dev has its INTx line asserted, unmask it if not
3826 * and return true. False is returned and the mask remains active if
3827 * there was still an interrupt pending.
3828 */
3829 bool pci_check_and_unmask_intx(struct pci_dev *dev)
3830 {
3831 return pci_check_and_set_intx_mask(dev, false);
3832 }
3833 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3834
3835 /**
3836 * pci_wait_for_pending_transaction - waits for pending transaction
3837 * @dev: the PCI device to operate on
3838 *
3839 * Return 0 if transaction is pending 1 otherwise.
3840 */
3841 int pci_wait_for_pending_transaction(struct pci_dev *dev)
3842 {
3843 if (!pci_is_pcie(dev))
3844 return 1;
3845
3846 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3847 PCI_EXP_DEVSTA_TRPND);
3848 }
3849 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3850
3851 /*
3852 * We should only need to wait 100ms after FLR, but some devices take longer.
3853 * Wait for up to 1000ms for config space to return something other than -1.
3854 * Intel IGD requires this when an LCD panel is attached. We read the 2nd
3855 * dword because VFs don't implement the 1st dword.
3856 */
3857 static void pci_flr_wait(struct pci_dev *dev)
3858 {
3859 int i = 0;
3860 u32 id;
3861
3862 do {
3863 msleep(100);
3864 pci_read_config_dword(dev, PCI_COMMAND, &id);
3865 } while (i++ < 10 && id == ~0);
3866
3867 if (id == ~0)
3868 dev_warn(&dev->dev, "Failed to return from FLR\n");
3869 else if (i > 1)
3870 dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
3871 (i - 1) * 100);
3872 }
3873
3874 /**
3875 * pcie_has_flr - check if a device supports function level resets
3876 * @dev: device to check
3877 *
3878 * Returns true if the device advertises support for PCIe function level
3879 * resets.
3880 */
3881 static bool pcie_has_flr(struct pci_dev *dev)
3882 {
3883 u32 cap;
3884
3885 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3886 return false;
3887
3888 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3889 return cap & PCI_EXP_DEVCAP_FLR;
3890 }
3891
3892 /**
3893 * pcie_flr - initiate a PCIe function level reset
3894 * @dev: device to reset
3895 *
3896 * Initiate a function level reset on @dev. The caller should ensure the
3897 * device supports FLR before calling this function, e.g. by using the
3898 * pcie_has_flr() helper.
3899 */
3900 void pcie_flr(struct pci_dev *dev)
3901 {
3902 if (!pci_wait_for_pending_transaction(dev))
3903 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3904
3905 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3906 pci_flr_wait(dev);
3907 }
3908 EXPORT_SYMBOL_GPL(pcie_flr);
3909
3910 static int pci_af_flr(struct pci_dev *dev, int probe)
3911 {
3912 int pos;
3913 u8 cap;
3914
3915 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3916 if (!pos)
3917 return -ENOTTY;
3918
3919 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3920 return -ENOTTY;
3921
3922 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3923 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3924 return -ENOTTY;
3925
3926 if (probe)
3927 return 0;
3928
3929 /*
3930 * Wait for Transaction Pending bit to clear. A word-aligned test
3931 * is used, so we use the conrol offset rather than status and shift
3932 * the test bit to match.
3933 */
3934 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3935 PCI_AF_STATUS_TP << 8))
3936 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
3937
3938 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3939 pci_flr_wait(dev);
3940 return 0;
3941 }
3942
3943 /**
3944 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3945 * @dev: Device to reset.
3946 * @probe: If set, only check if the device can be reset this way.
3947 *
3948 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3949 * unset, it will be reinitialized internally when going from PCI_D3hot to
3950 * PCI_D0. If that's the case and the device is not in a low-power state
3951 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3952 *
3953 * NOTE: This causes the caller to sleep for twice the device power transition
3954 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3955 * by default (i.e. unless the @dev's d3_delay field has a different value).
3956 * Moreover, only devices in D0 can be reset by this function.
3957 */
3958 static int pci_pm_reset(struct pci_dev *dev, int probe)
3959 {
3960 u16 csr;
3961
3962 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3963 return -ENOTTY;
3964
3965 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3966 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3967 return -ENOTTY;
3968
3969 if (probe)
3970 return 0;
3971
3972 if (dev->current_state != PCI_D0)
3973 return -EINVAL;
3974
3975 csr &= ~PCI_PM_CTRL_STATE_MASK;
3976 csr |= PCI_D3hot;
3977 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3978 pci_dev_d3_sleep(dev);
3979
3980 csr &= ~PCI_PM_CTRL_STATE_MASK;
3981 csr |= PCI_D0;
3982 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3983 pci_dev_d3_sleep(dev);
3984
3985 return 0;
3986 }
3987
3988 void pci_reset_secondary_bus(struct pci_dev *dev)
3989 {
3990 u16 ctrl;
3991
3992 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3993 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3994 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3995 /*
3996 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
3997 * this to 2ms to ensure that we meet the minimum requirement.
3998 */
3999 msleep(2);
4000
4001 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4002 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4003
4004 /*
4005 * Trhfa for conventional PCI is 2^25 clock cycles.
4006 * Assuming a minimum 33MHz clock this results in a 1s
4007 * delay before we can consider subordinate devices to
4008 * be re-initialized. PCIe has some ways to shorten this,
4009 * but we don't make use of them yet.
4010 */
4011 ssleep(1);
4012 }
4013
4014 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4015 {
4016 pci_reset_secondary_bus(dev);
4017 }
4018
4019 /**
4020 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
4021 * @dev: Bridge device
4022 *
4023 * Use the bridge control register to assert reset on the secondary bus.
4024 * Devices on the secondary bus are left in power-on state.
4025 */
4026 void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
4027 {
4028 pcibios_reset_secondary_bus(dev);
4029 }
4030 EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
4031
4032 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4033 {
4034 struct pci_dev *pdev;
4035
4036 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4037 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4038 return -ENOTTY;
4039
4040 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4041 if (pdev != dev)
4042 return -ENOTTY;
4043
4044 if (probe)
4045 return 0;
4046
4047 pci_reset_bridge_secondary_bus(dev->bus->self);
4048
4049 return 0;
4050 }
4051
4052 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4053 {
4054 int rc = -ENOTTY;
4055
4056 if (!hotplug || !try_module_get(hotplug->ops->owner))
4057 return rc;
4058
4059 if (hotplug->ops->reset_slot)
4060 rc = hotplug->ops->reset_slot(hotplug, probe);
4061
4062 module_put(hotplug->ops->owner);
4063
4064 return rc;
4065 }
4066
4067 static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4068 {
4069 struct pci_dev *pdev;
4070
4071 if (dev->subordinate || !dev->slot ||
4072 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4073 return -ENOTTY;
4074
4075 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4076 if (pdev != dev && pdev->slot == dev->slot)
4077 return -ENOTTY;
4078
4079 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4080 }
4081
4082 static void pci_dev_lock(struct pci_dev *dev)
4083 {
4084 pci_cfg_access_lock(dev);
4085 /* block PM suspend, driver probe, etc. */
4086 device_lock(&dev->dev);
4087 }
4088
4089 /* Return 1 on successful lock, 0 on contention */
4090 static int pci_dev_trylock(struct pci_dev *dev)
4091 {
4092 if (pci_cfg_access_trylock(dev)) {
4093 if (device_trylock(&dev->dev))
4094 return 1;
4095 pci_cfg_access_unlock(dev);
4096 }
4097
4098 return 0;
4099 }
4100
4101 static void pci_dev_unlock(struct pci_dev *dev)
4102 {
4103 device_unlock(&dev->dev);
4104 pci_cfg_access_unlock(dev);
4105 }
4106
4107 static void pci_dev_save_and_disable(struct pci_dev *dev)
4108 {
4109 const struct pci_error_handlers *err_handler =
4110 dev->driver ? dev->driver->err_handler : NULL;
4111
4112 /*
4113 * dev->driver->err_handler->reset_prepare() is protected against
4114 * races with ->remove() by the device lock, which must be held by
4115 * the caller.
4116 */
4117 if (err_handler && err_handler->reset_prepare)
4118 err_handler->reset_prepare(dev);
4119
4120 /*
4121 * Wake-up device prior to save. PM registers default to D0 after
4122 * reset and a simple register restore doesn't reliably return
4123 * to a non-D0 state anyway.
4124 */
4125 pci_set_power_state(dev, PCI_D0);
4126
4127 pci_save_state(dev);
4128 /*
4129 * Disable the device by clearing the Command register, except for
4130 * INTx-disable which is set. This not only disables MMIO and I/O port
4131 * BARs, but also prevents the device from being Bus Master, preventing
4132 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
4133 * compliant devices, INTx-disable prevents legacy interrupts.
4134 */
4135 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4136 }
4137
4138 static void pci_dev_restore(struct pci_dev *dev)
4139 {
4140 const struct pci_error_handlers *err_handler =
4141 dev->driver ? dev->driver->err_handler : NULL;
4142
4143 pci_restore_state(dev);
4144
4145 /*
4146 * dev->driver->err_handler->reset_done() is protected against
4147 * races with ->remove() by the device lock, which must be held by
4148 * the caller.
4149 */
4150 if (err_handler && err_handler->reset_done)
4151 err_handler->reset_done(dev);
4152 }
4153
4154 /**
4155 * __pci_reset_function - reset a PCI device function
4156 * @dev: PCI device to reset
4157 *
4158 * Some devices allow an individual function to be reset without affecting
4159 * other functions in the same device. The PCI device must be responsive
4160 * to PCI config space in order to use this function.
4161 *
4162 * The device function is presumed to be unused when this function is called.
4163 * Resetting the device will make the contents of PCI configuration space
4164 * random, so any caller of this must be prepared to reinitialise the
4165 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4166 * etc.
4167 *
4168 * Returns 0 if the device function was successfully reset or negative if the
4169 * device doesn't support resetting a single function.
4170 */
4171 int __pci_reset_function(struct pci_dev *dev)
4172 {
4173 int ret;
4174
4175 pci_dev_lock(dev);
4176 ret = __pci_reset_function_locked(dev);
4177 pci_dev_unlock(dev);
4178
4179 return ret;
4180 }
4181 EXPORT_SYMBOL_GPL(__pci_reset_function);
4182
4183 /**
4184 * __pci_reset_function_locked - reset a PCI device function while holding
4185 * the @dev mutex lock.
4186 * @dev: PCI device to reset
4187 *
4188 * Some devices allow an individual function to be reset without affecting
4189 * other functions in the same device. The PCI device must be responsive
4190 * to PCI config space in order to use this function.
4191 *
4192 * The device function is presumed to be unused and the caller is holding
4193 * the device mutex lock when this function is called.
4194 * Resetting the device will make the contents of PCI configuration space
4195 * random, so any caller of this must be prepared to reinitialise the
4196 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4197 * etc.
4198 *
4199 * Returns 0 if the device function was successfully reset or negative if the
4200 * device doesn't support resetting a single function.
4201 */
4202 int __pci_reset_function_locked(struct pci_dev *dev)
4203 {
4204 int rc;
4205
4206 might_sleep();
4207
4208 rc = pci_dev_specific_reset(dev, 0);
4209 if (rc != -ENOTTY)
4210 return rc;
4211 if (pcie_has_flr(dev)) {
4212 pcie_flr(dev);
4213 return 0;
4214 }
4215 rc = pci_af_flr(dev, 0);
4216 if (rc != -ENOTTY)
4217 return rc;
4218 rc = pci_pm_reset(dev, 0);
4219 if (rc != -ENOTTY)
4220 return rc;
4221 rc = pci_dev_reset_slot_function(dev, 0);
4222 if (rc != -ENOTTY)
4223 return rc;
4224 return pci_parent_bus_reset(dev, 0);
4225 }
4226 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4227
4228 /**
4229 * pci_probe_reset_function - check whether the device can be safely reset
4230 * @dev: PCI device to reset
4231 *
4232 * Some devices allow an individual function to be reset without affecting
4233 * other functions in the same device. The PCI device must be responsive
4234 * to PCI config space in order to use this function.
4235 *
4236 * Returns 0 if the device function can be reset or negative if the
4237 * device doesn't support resetting a single function.
4238 */
4239 int pci_probe_reset_function(struct pci_dev *dev)
4240 {
4241 int rc;
4242
4243 might_sleep();
4244
4245 rc = pci_dev_specific_reset(dev, 1);
4246 if (rc != -ENOTTY)
4247 return rc;
4248 if (pcie_has_flr(dev))
4249 return 0;
4250 rc = pci_af_flr(dev, 1);
4251 if (rc != -ENOTTY)
4252 return rc;
4253 rc = pci_pm_reset(dev, 1);
4254 if (rc != -ENOTTY)
4255 return rc;
4256 rc = pci_dev_reset_slot_function(dev, 1);
4257 if (rc != -ENOTTY)
4258 return rc;
4259
4260 return pci_parent_bus_reset(dev, 1);
4261 }
4262
4263 /**
4264 * pci_reset_function - quiesce and reset a PCI device function
4265 * @dev: PCI device to reset
4266 *
4267 * Some devices allow an individual function to be reset without affecting
4268 * other functions in the same device. The PCI device must be responsive
4269 * to PCI config space in order to use this function.
4270 *
4271 * This function does not just reset the PCI portion of a device, but
4272 * clears all the state associated with the device. This function differs
4273 * from __pci_reset_function in that it saves and restores device state
4274 * over the reset.
4275 *
4276 * Returns 0 if the device function was successfully reset or negative if the
4277 * device doesn't support resetting a single function.
4278 */
4279 int pci_reset_function(struct pci_dev *dev)
4280 {
4281 int rc;
4282
4283 rc = pci_probe_reset_function(dev);
4284 if (rc)
4285 return rc;
4286
4287 pci_dev_lock(dev);
4288 pci_dev_save_and_disable(dev);
4289
4290 rc = __pci_reset_function_locked(dev);
4291
4292 pci_dev_restore(dev);
4293 pci_dev_unlock(dev);
4294
4295 return rc;
4296 }
4297 EXPORT_SYMBOL_GPL(pci_reset_function);
4298
4299 /**
4300 * pci_reset_function_locked - quiesce and reset a PCI device function
4301 * @dev: PCI device to reset
4302 *
4303 * Some devices allow an individual function to be reset without affecting
4304 * other functions in the same device. The PCI device must be responsive
4305 * to PCI config space in order to use this function.
4306 *
4307 * This function does not just reset the PCI portion of a device, but
4308 * clears all the state associated with the device. This function differs
4309 * from __pci_reset_function() in that it saves and restores device state
4310 * over the reset. It also differs from pci_reset_function() in that it
4311 * requires the PCI device lock to be held.
4312 *
4313 * Returns 0 if the device function was successfully reset or negative if the
4314 * device doesn't support resetting a single function.
4315 */
4316 int pci_reset_function_locked(struct pci_dev *dev)
4317 {
4318 int rc;
4319
4320 rc = pci_probe_reset_function(dev);
4321 if (rc)
4322 return rc;
4323
4324 pci_dev_save_and_disable(dev);
4325
4326 rc = __pci_reset_function_locked(dev);
4327
4328 pci_dev_restore(dev);
4329
4330 return rc;
4331 }
4332 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4333
4334 /**
4335 * pci_try_reset_function - quiesce and reset a PCI device function
4336 * @dev: PCI device to reset
4337 *
4338 * Same as above, except return -EAGAIN if unable to lock device.
4339 */
4340 int pci_try_reset_function(struct pci_dev *dev)
4341 {
4342 int rc;
4343
4344 rc = pci_probe_reset_function(dev);
4345 if (rc)
4346 return rc;
4347
4348 if (!pci_dev_trylock(dev))
4349 return -EAGAIN;
4350
4351 pci_dev_save_and_disable(dev);
4352 rc = __pci_reset_function_locked(dev);
4353 pci_dev_unlock(dev);
4354
4355 pci_dev_restore(dev);
4356 return rc;
4357 }
4358 EXPORT_SYMBOL_GPL(pci_try_reset_function);
4359
4360 /* Do any devices on or below this bus prevent a bus reset? */
4361 static bool pci_bus_resetable(struct pci_bus *bus)
4362 {
4363 struct pci_dev *dev;
4364
4365 list_for_each_entry(dev, &bus->devices, bus_list) {
4366 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4367 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4368 return false;
4369 }
4370
4371 return true;
4372 }
4373
4374 /* Lock devices from the top of the tree down */
4375 static void pci_bus_lock(struct pci_bus *bus)
4376 {
4377 struct pci_dev *dev;
4378
4379 list_for_each_entry(dev, &bus->devices, bus_list) {
4380 pci_dev_lock(dev);
4381 if (dev->subordinate)
4382 pci_bus_lock(dev->subordinate);
4383 }
4384 }
4385
4386 /* Unlock devices from the bottom of the tree up */
4387 static void pci_bus_unlock(struct pci_bus *bus)
4388 {
4389 struct pci_dev *dev;
4390
4391 list_for_each_entry(dev, &bus->devices, bus_list) {
4392 if (dev->subordinate)
4393 pci_bus_unlock(dev->subordinate);
4394 pci_dev_unlock(dev);
4395 }
4396 }
4397
4398 /* Return 1 on successful lock, 0 on contention */
4399 static int pci_bus_trylock(struct pci_bus *bus)
4400 {
4401 struct pci_dev *dev;
4402
4403 list_for_each_entry(dev, &bus->devices, bus_list) {
4404 if (!pci_dev_trylock(dev))
4405 goto unlock;
4406 if (dev->subordinate) {
4407 if (!pci_bus_trylock(dev->subordinate)) {
4408 pci_dev_unlock(dev);
4409 goto unlock;
4410 }
4411 }
4412 }
4413 return 1;
4414
4415 unlock:
4416 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4417 if (dev->subordinate)
4418 pci_bus_unlock(dev->subordinate);
4419 pci_dev_unlock(dev);
4420 }
4421 return 0;
4422 }
4423
4424 /* Do any devices on or below this slot prevent a bus reset? */
4425 static bool pci_slot_resetable(struct pci_slot *slot)
4426 {
4427 struct pci_dev *dev;
4428
4429 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4430 if (!dev->slot || dev->slot != slot)
4431 continue;
4432 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4433 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4434 return false;
4435 }
4436
4437 return true;
4438 }
4439
4440 /* Lock devices from the top of the tree down */
4441 static void pci_slot_lock(struct pci_slot *slot)
4442 {
4443 struct pci_dev *dev;
4444
4445 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4446 if (!dev->slot || dev->slot != slot)
4447 continue;
4448 pci_dev_lock(dev);
4449 if (dev->subordinate)
4450 pci_bus_lock(dev->subordinate);
4451 }
4452 }
4453
4454 /* Unlock devices from the bottom of the tree up */
4455 static void pci_slot_unlock(struct pci_slot *slot)
4456 {
4457 struct pci_dev *dev;
4458
4459 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4460 if (!dev->slot || dev->slot != slot)
4461 continue;
4462 if (dev->subordinate)
4463 pci_bus_unlock(dev->subordinate);
4464 pci_dev_unlock(dev);
4465 }
4466 }
4467
4468 /* Return 1 on successful lock, 0 on contention */
4469 static int pci_slot_trylock(struct pci_slot *slot)
4470 {
4471 struct pci_dev *dev;
4472
4473 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4474 if (!dev->slot || dev->slot != slot)
4475 continue;
4476 if (!pci_dev_trylock(dev))
4477 goto unlock;
4478 if (dev->subordinate) {
4479 if (!pci_bus_trylock(dev->subordinate)) {
4480 pci_dev_unlock(dev);
4481 goto unlock;
4482 }
4483 }
4484 }
4485 return 1;
4486
4487 unlock:
4488 list_for_each_entry_continue_reverse(dev,
4489 &slot->bus->devices, bus_list) {
4490 if (!dev->slot || dev->slot != slot)
4491 continue;
4492 if (dev->subordinate)
4493 pci_bus_unlock(dev->subordinate);
4494 pci_dev_unlock(dev);
4495 }
4496 return 0;
4497 }
4498
4499 /* Save and disable devices from the top of the tree down */
4500 static void pci_bus_save_and_disable(struct pci_bus *bus)
4501 {
4502 struct pci_dev *dev;
4503
4504 list_for_each_entry(dev, &bus->devices, bus_list) {
4505 pci_dev_lock(dev);
4506 pci_dev_save_and_disable(dev);
4507 pci_dev_unlock(dev);
4508 if (dev->subordinate)
4509 pci_bus_save_and_disable(dev->subordinate);
4510 }
4511 }
4512
4513 /*
4514 * Restore devices from top of the tree down - parent bridges need to be
4515 * restored before we can get to subordinate devices.
4516 */
4517 static void pci_bus_restore(struct pci_bus *bus)
4518 {
4519 struct pci_dev *dev;
4520
4521 list_for_each_entry(dev, &bus->devices, bus_list) {
4522 pci_dev_lock(dev);
4523 pci_dev_restore(dev);
4524 pci_dev_unlock(dev);
4525 if (dev->subordinate)
4526 pci_bus_restore(dev->subordinate);
4527 }
4528 }
4529
4530 /* Save and disable devices from the top of the tree down */
4531 static void pci_slot_save_and_disable(struct pci_slot *slot)
4532 {
4533 struct pci_dev *dev;
4534
4535 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4536 if (!dev->slot || dev->slot != slot)
4537 continue;
4538 pci_dev_save_and_disable(dev);
4539 if (dev->subordinate)
4540 pci_bus_save_and_disable(dev->subordinate);
4541 }
4542 }
4543
4544 /*
4545 * Restore devices from top of the tree down - parent bridges need to be
4546 * restored before we can get to subordinate devices.
4547 */
4548 static void pci_slot_restore(struct pci_slot *slot)
4549 {
4550 struct pci_dev *dev;
4551
4552 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4553 if (!dev->slot || dev->slot != slot)
4554 continue;
4555 pci_dev_restore(dev);
4556 if (dev->subordinate)
4557 pci_bus_restore(dev->subordinate);
4558 }
4559 }
4560
4561 static int pci_slot_reset(struct pci_slot *slot, int probe)
4562 {
4563 int rc;
4564
4565 if (!slot || !pci_slot_resetable(slot))
4566 return -ENOTTY;
4567
4568 if (!probe)
4569 pci_slot_lock(slot);
4570
4571 might_sleep();
4572
4573 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4574
4575 if (!probe)
4576 pci_slot_unlock(slot);
4577
4578 return rc;
4579 }
4580
4581 /**
4582 * pci_probe_reset_slot - probe whether a PCI slot can be reset
4583 * @slot: PCI slot to probe
4584 *
4585 * Return 0 if slot can be reset, negative if a slot reset is not supported.
4586 */
4587 int pci_probe_reset_slot(struct pci_slot *slot)
4588 {
4589 return pci_slot_reset(slot, 1);
4590 }
4591 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4592
4593 /**
4594 * pci_reset_slot - reset a PCI slot
4595 * @slot: PCI slot to reset
4596 *
4597 * A PCI bus may host multiple slots, each slot may support a reset mechanism
4598 * independent of other slots. For instance, some slots may support slot power
4599 * control. In the case of a 1:1 bus to slot architecture, this function may
4600 * wrap the bus reset to avoid spurious slot related events such as hotplug.
4601 * Generally a slot reset should be attempted before a bus reset. All of the
4602 * function of the slot and any subordinate buses behind the slot are reset
4603 * through this function. PCI config space of all devices in the slot and
4604 * behind the slot is saved before and restored after reset.
4605 *
4606 * Return 0 on success, non-zero on error.
4607 */
4608 int pci_reset_slot(struct pci_slot *slot)
4609 {
4610 int rc;
4611
4612 rc = pci_slot_reset(slot, 1);
4613 if (rc)
4614 return rc;
4615
4616 pci_slot_save_and_disable(slot);
4617
4618 rc = pci_slot_reset(slot, 0);
4619
4620 pci_slot_restore(slot);
4621
4622 return rc;
4623 }
4624 EXPORT_SYMBOL_GPL(pci_reset_slot);
4625
4626 /**
4627 * pci_try_reset_slot - Try to reset a PCI slot
4628 * @slot: PCI slot to reset
4629 *
4630 * Same as above except return -EAGAIN if the slot cannot be locked
4631 */
4632 int pci_try_reset_slot(struct pci_slot *slot)
4633 {
4634 int rc;
4635
4636 rc = pci_slot_reset(slot, 1);
4637 if (rc)
4638 return rc;
4639
4640 pci_slot_save_and_disable(slot);
4641
4642 if (pci_slot_trylock(slot)) {
4643 might_sleep();
4644 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4645 pci_slot_unlock(slot);
4646 } else
4647 rc = -EAGAIN;
4648
4649 pci_slot_restore(slot);
4650
4651 return rc;
4652 }
4653 EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4654
4655 static int pci_bus_reset(struct pci_bus *bus, int probe)
4656 {
4657 if (!bus->self || !pci_bus_resetable(bus))
4658 return -ENOTTY;
4659
4660 if (probe)
4661 return 0;
4662
4663 pci_bus_lock(bus);
4664
4665 might_sleep();
4666
4667 pci_reset_bridge_secondary_bus(bus->self);
4668
4669 pci_bus_unlock(bus);
4670
4671 return 0;
4672 }
4673
4674 /**
4675 * pci_probe_reset_bus - probe whether a PCI bus can be reset
4676 * @bus: PCI bus to probe
4677 *
4678 * Return 0 if bus can be reset, negative if a bus reset is not supported.
4679 */
4680 int pci_probe_reset_bus(struct pci_bus *bus)
4681 {
4682 return pci_bus_reset(bus, 1);
4683 }
4684 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4685
4686 /**
4687 * pci_reset_bus - reset a PCI bus
4688 * @bus: top level PCI bus to reset
4689 *
4690 * Do a bus reset on the given bus and any subordinate buses, saving
4691 * and restoring state of all devices.
4692 *
4693 * Return 0 on success, non-zero on error.
4694 */
4695 int pci_reset_bus(struct pci_bus *bus)
4696 {
4697 int rc;
4698
4699 rc = pci_bus_reset(bus, 1);
4700 if (rc)
4701 return rc;
4702
4703 pci_bus_save_and_disable(bus);
4704
4705 rc = pci_bus_reset(bus, 0);
4706
4707 pci_bus_restore(bus);
4708
4709 return rc;
4710 }
4711 EXPORT_SYMBOL_GPL(pci_reset_bus);
4712
4713 /**
4714 * pci_try_reset_bus - Try to reset a PCI bus
4715 * @bus: top level PCI bus to reset
4716 *
4717 * Same as above except return -EAGAIN if the bus cannot be locked
4718 */
4719 int pci_try_reset_bus(struct pci_bus *bus)
4720 {
4721 int rc;
4722
4723 rc = pci_bus_reset(bus, 1);
4724 if (rc)
4725 return rc;
4726
4727 pci_bus_save_and_disable(bus);
4728
4729 if (pci_bus_trylock(bus)) {
4730 might_sleep();
4731 pci_reset_bridge_secondary_bus(bus->self);
4732 pci_bus_unlock(bus);
4733 } else
4734 rc = -EAGAIN;
4735
4736 pci_bus_restore(bus);
4737
4738 return rc;
4739 }
4740 EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4741
4742 /**
4743 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4744 * @dev: PCI device to query
4745 *
4746 * Returns mmrbc: maximum designed memory read count in bytes
4747 * or appropriate error value.
4748 */
4749 int pcix_get_max_mmrbc(struct pci_dev *dev)
4750 {
4751 int cap;
4752 u32 stat;
4753
4754 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4755 if (!cap)
4756 return -EINVAL;
4757
4758 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4759 return -EINVAL;
4760
4761 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4762 }
4763 EXPORT_SYMBOL(pcix_get_max_mmrbc);
4764
4765 /**
4766 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4767 * @dev: PCI device to query
4768 *
4769 * Returns mmrbc: maximum memory read count in bytes
4770 * or appropriate error value.
4771 */
4772 int pcix_get_mmrbc(struct pci_dev *dev)
4773 {
4774 int cap;
4775 u16 cmd;
4776
4777 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4778 if (!cap)
4779 return -EINVAL;
4780
4781 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4782 return -EINVAL;
4783
4784 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4785 }
4786 EXPORT_SYMBOL(pcix_get_mmrbc);
4787
4788 /**
4789 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4790 * @dev: PCI device to query
4791 * @mmrbc: maximum memory read count in bytes
4792 * valid values are 512, 1024, 2048, 4096
4793 *
4794 * If possible sets maximum memory read byte count, some bridges have erratas
4795 * that prevent this.
4796 */
4797 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4798 {
4799 int cap;
4800 u32 stat, v, o;
4801 u16 cmd;
4802
4803 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4804 return -EINVAL;
4805
4806 v = ffs(mmrbc) - 10;
4807
4808 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4809 if (!cap)
4810 return -EINVAL;
4811
4812 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4813 return -EINVAL;
4814
4815 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4816 return -E2BIG;
4817
4818 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4819 return -EINVAL;
4820
4821 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4822 if (o != v) {
4823 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
4824 return -EIO;
4825
4826 cmd &= ~PCI_X_CMD_MAX_READ;
4827 cmd |= v << 2;
4828 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4829 return -EIO;
4830 }
4831 return 0;
4832 }
4833 EXPORT_SYMBOL(pcix_set_mmrbc);
4834
4835 /**
4836 * pcie_get_readrq - get PCI Express read request size
4837 * @dev: PCI device to query
4838 *
4839 * Returns maximum memory read request in bytes
4840 * or appropriate error value.
4841 */
4842 int pcie_get_readrq(struct pci_dev *dev)
4843 {
4844 u16 ctl;
4845
4846 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4847
4848 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4849 }
4850 EXPORT_SYMBOL(pcie_get_readrq);
4851
4852 /**
4853 * pcie_set_readrq - set PCI Express maximum memory read request
4854 * @dev: PCI device to query
4855 * @rq: maximum memory read count in bytes
4856 * valid values are 128, 256, 512, 1024, 2048, 4096
4857 *
4858 * If possible sets maximum memory read request in bytes
4859 */
4860 int pcie_set_readrq(struct pci_dev *dev, int rq)
4861 {
4862 u16 v;
4863
4864 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
4865 return -EINVAL;
4866
4867 /*
4868 * If using the "performance" PCIe config, we clamp the
4869 * read rq size to the max packet size to prevent the
4870 * host bridge generating requests larger than we can
4871 * cope with
4872 */
4873 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4874 int mps = pcie_get_mps(dev);
4875
4876 if (mps < rq)
4877 rq = mps;
4878 }
4879
4880 v = (ffs(rq) - 8) << 12;
4881
4882 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4883 PCI_EXP_DEVCTL_READRQ, v);
4884 }
4885 EXPORT_SYMBOL(pcie_set_readrq);
4886
4887 /**
4888 * pcie_get_mps - get PCI Express maximum payload size
4889 * @dev: PCI device to query
4890 *
4891 * Returns maximum payload size in bytes
4892 */
4893 int pcie_get_mps(struct pci_dev *dev)
4894 {
4895 u16 ctl;
4896
4897 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4898
4899 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4900 }
4901 EXPORT_SYMBOL(pcie_get_mps);
4902
4903 /**
4904 * pcie_set_mps - set PCI Express maximum payload size
4905 * @dev: PCI device to query
4906 * @mps: maximum payload size in bytes
4907 * valid values are 128, 256, 512, 1024, 2048, 4096
4908 *
4909 * If possible sets maximum payload size
4910 */
4911 int pcie_set_mps(struct pci_dev *dev, int mps)
4912 {
4913 u16 v;
4914
4915 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
4916 return -EINVAL;
4917
4918 v = ffs(mps) - 8;
4919 if (v > dev->pcie_mpss)
4920 return -EINVAL;
4921 v <<= 5;
4922
4923 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4924 PCI_EXP_DEVCTL_PAYLOAD, v);
4925 }
4926 EXPORT_SYMBOL(pcie_set_mps);
4927
4928 /**
4929 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4930 * @dev: PCI device to query
4931 * @speed: storage for minimum speed
4932 * @width: storage for minimum width
4933 *
4934 * This function will walk up the PCI device chain and determine the minimum
4935 * link width and speed of the device.
4936 */
4937 int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4938 enum pcie_link_width *width)
4939 {
4940 int ret;
4941
4942 *speed = PCI_SPEED_UNKNOWN;
4943 *width = PCIE_LNK_WIDTH_UNKNOWN;
4944
4945 while (dev) {
4946 u16 lnksta;
4947 enum pci_bus_speed next_speed;
4948 enum pcie_link_width next_width;
4949
4950 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4951 if (ret)
4952 return ret;
4953
4954 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4955 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4956 PCI_EXP_LNKSTA_NLW_SHIFT;
4957
4958 if (next_speed < *speed)
4959 *speed = next_speed;
4960
4961 if (next_width < *width)
4962 *width = next_width;
4963
4964 dev = dev->bus->self;
4965 }
4966
4967 return 0;
4968 }
4969 EXPORT_SYMBOL(pcie_get_minimum_link);
4970
4971 /**
4972 * pci_select_bars - Make BAR mask from the type of resource
4973 * @dev: the PCI device for which BAR mask is made
4974 * @flags: resource type mask to be selected
4975 *
4976 * This helper routine makes bar mask from the type of resource.
4977 */
4978 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4979 {
4980 int i, bars = 0;
4981 for (i = 0; i < PCI_NUM_RESOURCES; i++)
4982 if (pci_resource_flags(dev, i) & flags)
4983 bars |= (1 << i);
4984 return bars;
4985 }
4986 EXPORT_SYMBOL(pci_select_bars);
4987
4988 /* Some architectures require additional programming to enable VGA */
4989 static arch_set_vga_state_t arch_set_vga_state;
4990
4991 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4992 {
4993 arch_set_vga_state = func; /* NULL disables */
4994 }
4995
4996 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4997 unsigned int command_bits, u32 flags)
4998 {
4999 if (arch_set_vga_state)
5000 return arch_set_vga_state(dev, decode, command_bits,
5001 flags);
5002 return 0;
5003 }
5004
5005 /**
5006 * pci_set_vga_state - set VGA decode state on device and parents if requested
5007 * @dev: the PCI device
5008 * @decode: true = enable decoding, false = disable decoding
5009 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
5010 * @flags: traverse ancestors and change bridges
5011 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
5012 */
5013 int pci_set_vga_state(struct pci_dev *dev, bool decode,
5014 unsigned int command_bits, u32 flags)
5015 {
5016 struct pci_bus *bus;
5017 struct pci_dev *bridge;
5018 u16 cmd;
5019 int rc;
5020
5021 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5022
5023 /* ARCH specific VGA enables */
5024 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5025 if (rc)
5026 return rc;
5027
5028 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5029 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5030 if (decode == true)
5031 cmd |= command_bits;
5032 else
5033 cmd &= ~command_bits;
5034 pci_write_config_word(dev, PCI_COMMAND, cmd);
5035 }
5036
5037 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5038 return 0;
5039
5040 bus = dev->bus;
5041 while (bus) {
5042 bridge = bus->self;
5043 if (bridge) {
5044 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5045 &cmd);
5046 if (decode == true)
5047 cmd |= PCI_BRIDGE_CTL_VGA;
5048 else
5049 cmd &= ~PCI_BRIDGE_CTL_VGA;
5050 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5051 cmd);
5052 }
5053 bus = bus->parent;
5054 }
5055 return 0;
5056 }
5057
5058 /**
5059 * pci_add_dma_alias - Add a DMA devfn alias for a device
5060 * @dev: the PCI device for which alias is added
5061 * @devfn: alias slot and function
5062 *
5063 * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
5064 * It should be called early, preferably as PCI fixup header quirk.
5065 */
5066 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5067 {
5068 if (!dev->dma_alias_mask)
5069 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5070 sizeof(long), GFP_KERNEL);
5071 if (!dev->dma_alias_mask) {
5072 dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
5073 return;
5074 }
5075
5076 set_bit(devfn, dev->dma_alias_mask);
5077 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
5078 PCI_SLOT(devfn), PCI_FUNC(devfn));
5079 }
5080
5081 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5082 {
5083 return (dev1->dma_alias_mask &&
5084 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5085 (dev2->dma_alias_mask &&
5086 test_bit(dev1->devfn, dev2->dma_alias_mask));
5087 }
5088
5089 bool pci_device_is_present(struct pci_dev *pdev)
5090 {
5091 u32 v;
5092
5093 if (pci_dev_is_disconnected(pdev))
5094 return false;
5095 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5096 }
5097 EXPORT_SYMBOL_GPL(pci_device_is_present);
5098
5099 void pci_ignore_hotplug(struct pci_dev *dev)
5100 {
5101 struct pci_dev *bridge = dev->bus->self;
5102
5103 dev->ignore_hotplug = 1;
5104 /* Propagate the "ignore hotplug" setting to the parent bridge. */
5105 if (bridge)
5106 bridge->ignore_hotplug = 1;
5107 }
5108 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5109
5110 resource_size_t __weak pcibios_default_alignment(void)
5111 {
5112 return 0;
5113 }
5114
5115 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
5116 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
5117 static DEFINE_SPINLOCK(resource_alignment_lock);
5118
5119 /**
5120 * pci_specified_resource_alignment - get resource alignment specified by user.
5121 * @dev: the PCI device to get
5122 * @resize: whether or not to change resources' size when reassigning alignment
5123 *
5124 * RETURNS: Resource alignment if it is specified.
5125 * Zero if it is not specified.
5126 */
5127 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5128 bool *resize)
5129 {
5130 int seg, bus, slot, func, align_order, count;
5131 unsigned short vendor, device, subsystem_vendor, subsystem_device;
5132 resource_size_t align = pcibios_default_alignment();
5133 char *p;
5134
5135 spin_lock(&resource_alignment_lock);
5136 p = resource_alignment_param;
5137 if (!*p && !align)
5138 goto out;
5139 if (pci_has_flag(PCI_PROBE_ONLY)) {
5140 align = 0;
5141 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5142 goto out;
5143 }
5144
5145 while (*p) {
5146 count = 0;
5147 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5148 p[count] == '@') {
5149 p += count + 1;
5150 } else {
5151 align_order = -1;
5152 }
5153 if (strncmp(p, "pci:", 4) == 0) {
5154 /* PCI vendor/device (subvendor/subdevice) ids are specified */
5155 p += 4;
5156 if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5157 &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5158 if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5159 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5160 p);
5161 break;
5162 }
5163 subsystem_vendor = subsystem_device = 0;
5164 }
5165 p += count;
5166 if ((!vendor || (vendor == dev->vendor)) &&
5167 (!device || (device == dev->device)) &&
5168 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5169 (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5170 *resize = true;
5171 if (align_order == -1)
5172 align = PAGE_SIZE;
5173 else
5174 align = 1 << align_order;
5175 /* Found */
5176 break;
5177 }
5178 }
5179 else {
5180 if (sscanf(p, "%x:%x:%x.%x%n",
5181 &seg, &bus, &slot, &func, &count) != 4) {
5182 seg = 0;
5183 if (sscanf(p, "%x:%x.%x%n",
5184 &bus, &slot, &func, &count) != 3) {
5185 /* Invalid format */
5186 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5187 p);
5188 break;
5189 }
5190 }
5191 p += count;
5192 if (seg == pci_domain_nr(dev->bus) &&
5193 bus == dev->bus->number &&
5194 slot == PCI_SLOT(dev->devfn) &&
5195 func == PCI_FUNC(dev->devfn)) {
5196 *resize = true;
5197 if (align_order == -1)
5198 align = PAGE_SIZE;
5199 else
5200 align = 1 << align_order;
5201 /* Found */
5202 break;
5203 }
5204 }
5205 if (*p != ';' && *p != ',') {
5206 /* End of param or invalid format */
5207 break;
5208 }
5209 p++;
5210 }
5211 out:
5212 spin_unlock(&resource_alignment_lock);
5213 return align;
5214 }
5215
5216 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5217 resource_size_t align, bool resize)
5218 {
5219 struct resource *r = &dev->resource[bar];
5220 resource_size_t size;
5221
5222 if (!(r->flags & IORESOURCE_MEM))
5223 return;
5224
5225 if (r->flags & IORESOURCE_PCI_FIXED) {
5226 dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5227 bar, r, (unsigned long long)align);
5228 return;
5229 }
5230
5231 size = resource_size(r);
5232 if (size >= align)
5233 return;
5234
5235 /*
5236 * Increase the alignment of the resource. There are two ways we
5237 * can do this:
5238 *
5239 * 1) Increase the size of the resource. BARs are aligned on their
5240 * size, so when we reallocate space for this resource, we'll
5241 * allocate it with the larger alignment. This also prevents
5242 * assignment of any other BARs inside the alignment region, so
5243 * if we're requesting page alignment, this means no other BARs
5244 * will share the page.
5245 *
5246 * The disadvantage is that this makes the resource larger than
5247 * the hardware BAR, which may break drivers that compute things
5248 * based on the resource size, e.g., to find registers at a
5249 * fixed offset before the end of the BAR.
5250 *
5251 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
5252 * set r->start to the desired alignment. By itself this
5253 * doesn't prevent other BARs being put inside the alignment
5254 * region, but if we realign *every* resource of every device in
5255 * the system, none of them will share an alignment region.
5256 *
5257 * When the user has requested alignment for only some devices via
5258 * the "pci=resource_alignment" argument, "resize" is true and we
5259 * use the first method. Otherwise we assume we're aligning all
5260 * devices and we use the second.
5261 */
5262
5263 dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n",
5264 bar, r, (unsigned long long)align);
5265
5266 if (resize) {
5267 r->start = 0;
5268 r->end = align - 1;
5269 } else {
5270 r->flags &= ~IORESOURCE_SIZEALIGN;
5271 r->flags |= IORESOURCE_STARTALIGN;
5272 r->start = align;
5273 r->end = r->start + size - 1;
5274 }
5275 r->flags |= IORESOURCE_UNSET;
5276 }
5277
5278 /*
5279 * This function disables memory decoding and releases memory resources
5280 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
5281 * It also rounds up size to specified alignment.
5282 * Later on, the kernel will assign page-aligned memory resource back
5283 * to the device.
5284 */
5285 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5286 {
5287 int i;
5288 struct resource *r;
5289 resource_size_t align;
5290 u16 command;
5291 bool resize = false;
5292
5293 /*
5294 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
5295 * 3.4.1.11. Their resources are allocated from the space
5296 * described by the VF BARx register in the PF's SR-IOV capability.
5297 * We can't influence their alignment here.
5298 */
5299 if (dev->is_virtfn)
5300 return;
5301
5302 /* check if specified PCI is target device to reassign */
5303 align = pci_specified_resource_alignment(dev, &resize);
5304 if (!align)
5305 return;
5306
5307 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5308 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5309 dev_warn(&dev->dev,
5310 "Can't reassign resources to host bridge.\n");
5311 return;
5312 }
5313
5314 dev_info(&dev->dev,
5315 "Disabling memory decoding and releasing memory resources.\n");
5316 pci_read_config_word(dev, PCI_COMMAND, &command);
5317 command &= ~PCI_COMMAND_MEMORY;
5318 pci_write_config_word(dev, PCI_COMMAND, command);
5319
5320 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5321 pci_request_resource_alignment(dev, i, align, resize);
5322
5323 /*
5324 * Need to disable bridge's resource window,
5325 * to enable the kernel to reassign new resource
5326 * window later on.
5327 */
5328 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5329 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5330 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5331 r = &dev->resource[i];
5332 if (!(r->flags & IORESOURCE_MEM))
5333 continue;
5334 r->flags |= IORESOURCE_UNSET;
5335 r->end = resource_size(r) - 1;
5336 r->start = 0;
5337 }
5338 pci_disable_bridge_window(dev);
5339 }
5340 }
5341
5342 static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5343 {
5344 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5345 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5346 spin_lock(&resource_alignment_lock);
5347 strncpy(resource_alignment_param, buf, count);
5348 resource_alignment_param[count] = '\0';
5349 spin_unlock(&resource_alignment_lock);
5350 return count;
5351 }
5352
5353 static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5354 {
5355 size_t count;
5356 spin_lock(&resource_alignment_lock);
5357 count = snprintf(buf, size, "%s", resource_alignment_param);
5358 spin_unlock(&resource_alignment_lock);
5359 return count;
5360 }
5361
5362 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5363 {
5364 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5365 }
5366
5367 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5368 const char *buf, size_t count)
5369 {
5370 return pci_set_resource_alignment_param(buf, count);
5371 }
5372
5373 static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5374 pci_resource_alignment_store);
5375
5376 static int __init pci_resource_alignment_sysfs_init(void)
5377 {
5378 return bus_create_file(&pci_bus_type,
5379 &bus_attr_resource_alignment);
5380 }
5381 late_initcall(pci_resource_alignment_sysfs_init);
5382
5383 static void pci_no_domains(void)
5384 {
5385 #ifdef CONFIG_PCI_DOMAINS
5386 pci_domains_supported = 0;
5387 #endif
5388 }
5389
5390 #ifdef CONFIG_PCI_DOMAINS
5391 static atomic_t __domain_nr = ATOMIC_INIT(-1);
5392
5393 int pci_get_new_domain_nr(void)
5394 {
5395 return atomic_inc_return(&__domain_nr);
5396 }
5397
5398 #ifdef CONFIG_PCI_DOMAINS_GENERIC
5399 static int of_pci_bus_find_domain_nr(struct device *parent)
5400 {
5401 static int use_dt_domains = -1;
5402 int domain = -1;
5403
5404 if (parent)
5405 domain = of_get_pci_domain_nr(parent->of_node);
5406 /*
5407 * Check DT domain and use_dt_domains values.
5408 *
5409 * If DT domain property is valid (domain >= 0) and
5410 * use_dt_domains != 0, the DT assignment is valid since this means
5411 * we have not previously allocated a domain number by using
5412 * pci_get_new_domain_nr(); we should also update use_dt_domains to
5413 * 1, to indicate that we have just assigned a domain number from
5414 * DT.
5415 *
5416 * If DT domain property value is not valid (ie domain < 0), and we
5417 * have not previously assigned a domain number from DT
5418 * (use_dt_domains != 1) we should assign a domain number by
5419 * using the:
5420 *
5421 * pci_get_new_domain_nr()
5422 *
5423 * API and update the use_dt_domains value to keep track of method we
5424 * are using to assign domain numbers (use_dt_domains = 0).
5425 *
5426 * All other combinations imply we have a platform that is trying
5427 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
5428 * which is a recipe for domain mishandling and it is prevented by
5429 * invalidating the domain value (domain = -1) and printing a
5430 * corresponding error.
5431 */
5432 if (domain >= 0 && use_dt_domains) {
5433 use_dt_domains = 1;
5434 } else if (domain < 0 && use_dt_domains != 1) {
5435 use_dt_domains = 0;
5436 domain = pci_get_new_domain_nr();
5437 } else {
5438 dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
5439 parent->of_node->full_name);
5440 domain = -1;
5441 }
5442
5443 return domain;
5444 }
5445
5446 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5447 {
5448 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5449 acpi_pci_bus_find_domain_nr(bus);
5450 }
5451 #endif
5452 #endif
5453
5454 /**
5455 * pci_ext_cfg_avail - can we access extended PCI config space?
5456 *
5457 * Returns 1 if we can access PCI extended config space (offsets
5458 * greater than 0xff). This is the default implementation. Architecture
5459 * implementations can override this.
5460 */
5461 int __weak pci_ext_cfg_avail(void)
5462 {
5463 return 1;
5464 }
5465
5466 void __weak pci_fixup_cardbus(struct pci_bus *bus)
5467 {
5468 }
5469 EXPORT_SYMBOL(pci_fixup_cardbus);
5470
5471 static int __init pci_setup(char *str)
5472 {
5473 while (str) {
5474 char *k = strchr(str, ',');
5475 if (k)
5476 *k++ = 0;
5477 if (*str && (str = pcibios_setup(str)) && *str) {
5478 if (!strcmp(str, "nomsi")) {
5479 pci_no_msi();
5480 } else if (!strcmp(str, "noaer")) {
5481 pci_no_aer();
5482 } else if (!strncmp(str, "realloc=", 8)) {
5483 pci_realloc_get_opt(str + 8);
5484 } else if (!strncmp(str, "realloc", 7)) {
5485 pci_realloc_get_opt("on");
5486 } else if (!strcmp(str, "nodomains")) {
5487 pci_no_domains();
5488 } else if (!strncmp(str, "noari", 5)) {
5489 pcie_ari_disabled = true;
5490 } else if (!strncmp(str, "cbiosize=", 9)) {
5491 pci_cardbus_io_size = memparse(str + 9, &str);
5492 } else if (!strncmp(str, "cbmemsize=", 10)) {
5493 pci_cardbus_mem_size = memparse(str + 10, &str);
5494 } else if (!strncmp(str, "resource_alignment=", 19)) {
5495 pci_set_resource_alignment_param(str + 19,
5496 strlen(str + 19));
5497 } else if (!strncmp(str, "ecrc=", 5)) {
5498 pcie_ecrc_get_policy(str + 5);
5499 } else if (!strncmp(str, "hpiosize=", 9)) {
5500 pci_hotplug_io_size = memparse(str + 9, &str);
5501 } else if (!strncmp(str, "hpmemsize=", 10)) {
5502 pci_hotplug_mem_size = memparse(str + 10, &str);
5503 } else if (!strncmp(str, "hpbussize=", 10)) {
5504 pci_hotplug_bus_size =
5505 simple_strtoul(str + 10, &str, 0);
5506 if (pci_hotplug_bus_size > 0xff)
5507 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5508 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5509 pcie_bus_config = PCIE_BUS_TUNE_OFF;
5510 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
5511 pcie_bus_config = PCIE_BUS_SAFE;
5512 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
5513 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5514 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5515 pcie_bus_config = PCIE_BUS_PEER2PEER;
5516 } else if (!strncmp(str, "pcie_scan_all", 13)) {
5517 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
5518 } else {
5519 printk(KERN_ERR "PCI: Unknown option `%s'\n",
5520 str);
5521 }
5522 }
5523 str = k;
5524 }
5525 return 0;
5526 }
5527 early_param("pci", pci_setup);