]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/pci/pci.c
Merge branch 'linux-next' of git://git.infradead.org/ubi-2.6
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / pci.c
1 /*
2 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm/setup.h>
26 #include "pci.h"
27
28 const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30 };
31 EXPORT_SYMBOL_GPL(pci_power_names);
32
33 int isa_dma_bridge_buggy;
34 EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36 int pci_pci_problems;
37 EXPORT_SYMBOL(pci_pci_problems);
38
39 unsigned int pci_pm_d3_delay;
40
41 static void pci_pme_list_scan(struct work_struct *work);
42
43 static LIST_HEAD(pci_pme_list);
44 static DEFINE_MUTEX(pci_pme_list_mutex);
45 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47 struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50 };
51
52 #define PME_TIMEOUT 1000 /* How long between PME checks */
53
54 static void pci_dev_d3_sleep(struct pci_dev *dev)
55 {
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62 }
63
64 #ifdef CONFIG_PCI_DOMAINS
65 int pci_domains_supported = 1;
66 #endif
67
68 #define DEFAULT_CARDBUS_IO_SIZE (256)
69 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
71 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
74 #define DEFAULT_HOTPLUG_IO_SIZE (256)
75 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
77 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
80 /*
81 * The default CLS is used if arch didn't set CLS explicitly and not
82 * all pci devices agree on the same value. Arch can override either
83 * the dfl or actual value as it sees fit. Don't forget this is
84 * measured in 32-bit words, not bytes.
85 */
86 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
87 u8 pci_cache_line_size;
88
89 /**
90 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
91 * @bus: pointer to PCI bus structure to search
92 *
93 * Given a PCI bus, returns the highest PCI bus number present in the set
94 * including the given PCI bus and its list of child PCI buses.
95 */
96 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
97 {
98 struct list_head *tmp;
99 unsigned char max, n;
100
101 max = bus->subordinate;
102 list_for_each(tmp, &bus->children) {
103 n = pci_bus_max_busnr(pci_bus_b(tmp));
104 if(n > max)
105 max = n;
106 }
107 return max;
108 }
109 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
110
111 #ifdef CONFIG_HAS_IOMEM
112 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
113 {
114 /*
115 * Make sure the BAR is actually a memory resource, not an IO resource
116 */
117 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
118 WARN_ON(1);
119 return NULL;
120 }
121 return ioremap_nocache(pci_resource_start(pdev, bar),
122 pci_resource_len(pdev, bar));
123 }
124 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
125 #endif
126
127 #if 0
128 /**
129 * pci_max_busnr - returns maximum PCI bus number
130 *
131 * Returns the highest PCI bus number present in the system global list of
132 * PCI buses.
133 */
134 unsigned char __devinit
135 pci_max_busnr(void)
136 {
137 struct pci_bus *bus = NULL;
138 unsigned char max, n;
139
140 max = 0;
141 while ((bus = pci_find_next_bus(bus)) != NULL) {
142 n = pci_bus_max_busnr(bus);
143 if(n > max)
144 max = n;
145 }
146 return max;
147 }
148
149 #endif /* 0 */
150
151 #define PCI_FIND_CAP_TTL 48
152
153 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
154 u8 pos, int cap, int *ttl)
155 {
156 u8 id;
157
158 while ((*ttl)--) {
159 pci_bus_read_config_byte(bus, devfn, pos, &pos);
160 if (pos < 0x40)
161 break;
162 pos &= ~3;
163 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
164 &id);
165 if (id == 0xff)
166 break;
167 if (id == cap)
168 return pos;
169 pos += PCI_CAP_LIST_NEXT;
170 }
171 return 0;
172 }
173
174 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
175 u8 pos, int cap)
176 {
177 int ttl = PCI_FIND_CAP_TTL;
178
179 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
180 }
181
182 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
183 {
184 return __pci_find_next_cap(dev->bus, dev->devfn,
185 pos + PCI_CAP_LIST_NEXT, cap);
186 }
187 EXPORT_SYMBOL_GPL(pci_find_next_capability);
188
189 static int __pci_bus_find_cap_start(struct pci_bus *bus,
190 unsigned int devfn, u8 hdr_type)
191 {
192 u16 status;
193
194 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
195 if (!(status & PCI_STATUS_CAP_LIST))
196 return 0;
197
198 switch (hdr_type) {
199 case PCI_HEADER_TYPE_NORMAL:
200 case PCI_HEADER_TYPE_BRIDGE:
201 return PCI_CAPABILITY_LIST;
202 case PCI_HEADER_TYPE_CARDBUS:
203 return PCI_CB_CAPABILITY_LIST;
204 default:
205 return 0;
206 }
207
208 return 0;
209 }
210
211 /**
212 * pci_find_capability - query for devices' capabilities
213 * @dev: PCI device to query
214 * @cap: capability code
215 *
216 * Tell if a device supports a given PCI capability.
217 * Returns the address of the requested capability structure within the
218 * device's PCI configuration space or 0 in case the device does not
219 * support it. Possible values for @cap:
220 *
221 * %PCI_CAP_ID_PM Power Management
222 * %PCI_CAP_ID_AGP Accelerated Graphics Port
223 * %PCI_CAP_ID_VPD Vital Product Data
224 * %PCI_CAP_ID_SLOTID Slot Identification
225 * %PCI_CAP_ID_MSI Message Signalled Interrupts
226 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
227 * %PCI_CAP_ID_PCIX PCI-X
228 * %PCI_CAP_ID_EXP PCI Express
229 */
230 int pci_find_capability(struct pci_dev *dev, int cap)
231 {
232 int pos;
233
234 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
235 if (pos)
236 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
237
238 return pos;
239 }
240
241 /**
242 * pci_bus_find_capability - query for devices' capabilities
243 * @bus: the PCI bus to query
244 * @devfn: PCI device to query
245 * @cap: capability code
246 *
247 * Like pci_find_capability() but works for pci devices that do not have a
248 * pci_dev structure set up yet.
249 *
250 * Returns the address of the requested capability structure within the
251 * device's PCI configuration space or 0 in case the device does not
252 * support it.
253 */
254 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
255 {
256 int pos;
257 u8 hdr_type;
258
259 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
260
261 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
262 if (pos)
263 pos = __pci_find_next_cap(bus, devfn, pos, cap);
264
265 return pos;
266 }
267
268 /**
269 * pci_find_ext_capability - Find an extended capability
270 * @dev: PCI device to query
271 * @cap: capability code
272 *
273 * Returns the address of the requested extended capability structure
274 * within the device's PCI configuration space or 0 if the device does
275 * not support it. Possible values for @cap:
276 *
277 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
278 * %PCI_EXT_CAP_ID_VC Virtual Channel
279 * %PCI_EXT_CAP_ID_DSN Device Serial Number
280 * %PCI_EXT_CAP_ID_PWR Power Budgeting
281 */
282 int pci_find_ext_capability(struct pci_dev *dev, int cap)
283 {
284 u32 header;
285 int ttl;
286 int pos = PCI_CFG_SPACE_SIZE;
287
288 /* minimum 8 bytes per capability */
289 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
290
291 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
292 return 0;
293
294 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
295 return 0;
296
297 /*
298 * If we have no capabilities, this is indicated by cap ID,
299 * cap version and next pointer all being 0.
300 */
301 if (header == 0)
302 return 0;
303
304 while (ttl-- > 0) {
305 if (PCI_EXT_CAP_ID(header) == cap)
306 return pos;
307
308 pos = PCI_EXT_CAP_NEXT(header);
309 if (pos < PCI_CFG_SPACE_SIZE)
310 break;
311
312 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
313 break;
314 }
315
316 return 0;
317 }
318 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
319
320 /**
321 * pci_bus_find_ext_capability - find an extended capability
322 * @bus: the PCI bus to query
323 * @devfn: PCI device to query
324 * @cap: capability code
325 *
326 * Like pci_find_ext_capability() but works for pci devices that do not have a
327 * pci_dev structure set up yet.
328 *
329 * Returns the address of the requested capability structure within the
330 * device's PCI configuration space or 0 in case the device does not
331 * support it.
332 */
333 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
334 int cap)
335 {
336 u32 header;
337 int ttl;
338 int pos = PCI_CFG_SPACE_SIZE;
339
340 /* minimum 8 bytes per capability */
341 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
342
343 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
344 return 0;
345 if (header == 0xffffffff || header == 0)
346 return 0;
347
348 while (ttl-- > 0) {
349 if (PCI_EXT_CAP_ID(header) == cap)
350 return pos;
351
352 pos = PCI_EXT_CAP_NEXT(header);
353 if (pos < PCI_CFG_SPACE_SIZE)
354 break;
355
356 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
357 break;
358 }
359
360 return 0;
361 }
362
363 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
364 {
365 int rc, ttl = PCI_FIND_CAP_TTL;
366 u8 cap, mask;
367
368 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
369 mask = HT_3BIT_CAP_MASK;
370 else
371 mask = HT_5BIT_CAP_MASK;
372
373 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
374 PCI_CAP_ID_HT, &ttl);
375 while (pos) {
376 rc = pci_read_config_byte(dev, pos + 3, &cap);
377 if (rc != PCIBIOS_SUCCESSFUL)
378 return 0;
379
380 if ((cap & mask) == ht_cap)
381 return pos;
382
383 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
384 pos + PCI_CAP_LIST_NEXT,
385 PCI_CAP_ID_HT, &ttl);
386 }
387
388 return 0;
389 }
390 /**
391 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
392 * @dev: PCI device to query
393 * @pos: Position from which to continue searching
394 * @ht_cap: Hypertransport capability code
395 *
396 * To be used in conjunction with pci_find_ht_capability() to search for
397 * all capabilities matching @ht_cap. @pos should always be a value returned
398 * from pci_find_ht_capability().
399 *
400 * NB. To be 100% safe against broken PCI devices, the caller should take
401 * steps to avoid an infinite loop.
402 */
403 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
404 {
405 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
406 }
407 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
408
409 /**
410 * pci_find_ht_capability - query a device's Hypertransport capabilities
411 * @dev: PCI device to query
412 * @ht_cap: Hypertransport capability code
413 *
414 * Tell if a device supports a given Hypertransport capability.
415 * Returns an address within the device's PCI configuration space
416 * or 0 in case the device does not support the request capability.
417 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
418 * which has a Hypertransport capability matching @ht_cap.
419 */
420 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
421 {
422 int pos;
423
424 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
425 if (pos)
426 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
427
428 return pos;
429 }
430 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
431
432 /**
433 * pci_find_parent_resource - return resource region of parent bus of given region
434 * @dev: PCI device structure contains resources to be searched
435 * @res: child resource record for which parent is sought
436 *
437 * For given resource region of given device, return the resource
438 * region of parent bus the given region is contained in or where
439 * it should be allocated from.
440 */
441 struct resource *
442 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
443 {
444 const struct pci_bus *bus = dev->bus;
445 int i;
446 struct resource *best = NULL, *r;
447
448 pci_bus_for_each_resource(bus, r, i) {
449 if (!r)
450 continue;
451 if (res->start && !(res->start >= r->start && res->end <= r->end))
452 continue; /* Not contained */
453 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
454 continue; /* Wrong type */
455 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
456 return r; /* Exact match */
457 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
458 if (r->flags & IORESOURCE_PREFETCH)
459 continue;
460 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
461 if (!best)
462 best = r;
463 }
464 return best;
465 }
466
467 /**
468 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
469 * @dev: PCI device to have its BARs restored
470 *
471 * Restore the BAR values for a given device, so as to make it
472 * accessible by its driver.
473 */
474 static void
475 pci_restore_bars(struct pci_dev *dev)
476 {
477 int i;
478
479 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
480 pci_update_resource(dev, i);
481 }
482
483 static struct pci_platform_pm_ops *pci_platform_pm;
484
485 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
486 {
487 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
488 || !ops->sleep_wake || !ops->can_wakeup)
489 return -EINVAL;
490 pci_platform_pm = ops;
491 return 0;
492 }
493
494 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
495 {
496 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
497 }
498
499 static inline int platform_pci_set_power_state(struct pci_dev *dev,
500 pci_power_t t)
501 {
502 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
503 }
504
505 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
506 {
507 return pci_platform_pm ?
508 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
509 }
510
511 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
512 {
513 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
514 }
515
516 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
517 {
518 return pci_platform_pm ?
519 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
520 }
521
522 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
523 {
524 return pci_platform_pm ?
525 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
526 }
527
528 /**
529 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
530 * given PCI device
531 * @dev: PCI device to handle.
532 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
533 *
534 * RETURN VALUE:
535 * -EINVAL if the requested state is invalid.
536 * -EIO if device does not support PCI PM or its PM capabilities register has a
537 * wrong version, or device doesn't support the requested state.
538 * 0 if device already is in the requested state.
539 * 0 if device's power state has been successfully changed.
540 */
541 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
542 {
543 u16 pmcsr;
544 bool need_restore = false;
545
546 /* Check if we're already there */
547 if (dev->current_state == state)
548 return 0;
549
550 if (!dev->pm_cap)
551 return -EIO;
552
553 if (state < PCI_D0 || state > PCI_D3hot)
554 return -EINVAL;
555
556 /* Validate current state:
557 * Can enter D0 from any state, but if we can only go deeper
558 * to sleep if we're already in a low power state
559 */
560 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
561 && dev->current_state > state) {
562 dev_err(&dev->dev, "invalid power transition "
563 "(from state %d to %d)\n", dev->current_state, state);
564 return -EINVAL;
565 }
566
567 /* check if this device supports the desired state */
568 if ((state == PCI_D1 && !dev->d1_support)
569 || (state == PCI_D2 && !dev->d2_support))
570 return -EIO;
571
572 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
573
574 /* If we're (effectively) in D3, force entire word to 0.
575 * This doesn't affect PME_Status, disables PME_En, and
576 * sets PowerState to 0.
577 */
578 switch (dev->current_state) {
579 case PCI_D0:
580 case PCI_D1:
581 case PCI_D2:
582 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
583 pmcsr |= state;
584 break;
585 case PCI_D3hot:
586 case PCI_D3cold:
587 case PCI_UNKNOWN: /* Boot-up */
588 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
589 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
590 need_restore = true;
591 /* Fall-through: force to D0 */
592 default:
593 pmcsr = 0;
594 break;
595 }
596
597 /* enter specified state */
598 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
599
600 /* Mandatory power management transition delays */
601 /* see PCI PM 1.1 5.6.1 table 18 */
602 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
603 pci_dev_d3_sleep(dev);
604 else if (state == PCI_D2 || dev->current_state == PCI_D2)
605 udelay(PCI_PM_D2_DELAY);
606
607 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
608 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
609 if (dev->current_state != state && printk_ratelimit())
610 dev_info(&dev->dev, "Refused to change power state, "
611 "currently in D%d\n", dev->current_state);
612
613 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
614 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
615 * from D3hot to D0 _may_ perform an internal reset, thereby
616 * going to "D0 Uninitialized" rather than "D0 Initialized".
617 * For example, at least some versions of the 3c905B and the
618 * 3c556B exhibit this behaviour.
619 *
620 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
621 * devices in a D3hot state at boot. Consequently, we need to
622 * restore at least the BARs so that the device will be
623 * accessible to its driver.
624 */
625 if (need_restore)
626 pci_restore_bars(dev);
627
628 if (dev->bus->self)
629 pcie_aspm_pm_state_change(dev->bus->self);
630
631 return 0;
632 }
633
634 /**
635 * pci_update_current_state - Read PCI power state of given device from its
636 * PCI PM registers and cache it
637 * @dev: PCI device to handle.
638 * @state: State to cache in case the device doesn't have the PM capability
639 */
640 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
641 {
642 if (dev->pm_cap) {
643 u16 pmcsr;
644
645 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
646 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
647 } else {
648 dev->current_state = state;
649 }
650 }
651
652 /**
653 * pci_platform_power_transition - Use platform to change device power state
654 * @dev: PCI device to handle.
655 * @state: State to put the device into.
656 */
657 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
658 {
659 int error;
660
661 if (platform_pci_power_manageable(dev)) {
662 error = platform_pci_set_power_state(dev, state);
663 if (!error)
664 pci_update_current_state(dev, state);
665 } else {
666 error = -ENODEV;
667 /* Fall back to PCI_D0 if native PM is not supported */
668 if (!dev->pm_cap)
669 dev->current_state = PCI_D0;
670 }
671
672 return error;
673 }
674
675 /**
676 * __pci_start_power_transition - Start power transition of a PCI device
677 * @dev: PCI device to handle.
678 * @state: State to put the device into.
679 */
680 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
681 {
682 if (state == PCI_D0)
683 pci_platform_power_transition(dev, PCI_D0);
684 }
685
686 /**
687 * __pci_complete_power_transition - Complete power transition of a PCI device
688 * @dev: PCI device to handle.
689 * @state: State to put the device into.
690 *
691 * This function should not be called directly by device drivers.
692 */
693 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
694 {
695 return state >= PCI_D0 ?
696 pci_platform_power_transition(dev, state) : -EINVAL;
697 }
698 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
699
700 /**
701 * pci_set_power_state - Set the power state of a PCI device
702 * @dev: PCI device to handle.
703 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
704 *
705 * Transition a device to a new power state, using the platform firmware and/or
706 * the device's PCI PM registers.
707 *
708 * RETURN VALUE:
709 * -EINVAL if the requested state is invalid.
710 * -EIO if device does not support PCI PM or its PM capabilities register has a
711 * wrong version, or device doesn't support the requested state.
712 * 0 if device already is in the requested state.
713 * 0 if device's power state has been successfully changed.
714 */
715 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
716 {
717 int error;
718
719 /* bound the state we're entering */
720 if (state > PCI_D3hot)
721 state = PCI_D3hot;
722 else if (state < PCI_D0)
723 state = PCI_D0;
724 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
725 /*
726 * If the device or the parent bridge do not support PCI PM,
727 * ignore the request if we're doing anything other than putting
728 * it into D0 (which would only happen on boot).
729 */
730 return 0;
731
732 __pci_start_power_transition(dev, state);
733
734 /* This device is quirked not to be put into D3, so
735 don't put it in D3 */
736 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
737 return 0;
738
739 error = pci_raw_set_power_state(dev, state);
740
741 if (!__pci_complete_power_transition(dev, state))
742 error = 0;
743 /*
744 * When aspm_policy is "powersave" this call ensures
745 * that ASPM is configured.
746 */
747 if (!error && dev->bus->self)
748 pcie_aspm_powersave_config_link(dev->bus->self);
749
750 return error;
751 }
752
753 /**
754 * pci_choose_state - Choose the power state of a PCI device
755 * @dev: PCI device to be suspended
756 * @state: target sleep state for the whole system. This is the value
757 * that is passed to suspend() function.
758 *
759 * Returns PCI power state suitable for given device and given system
760 * message.
761 */
762
763 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
764 {
765 pci_power_t ret;
766
767 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
768 return PCI_D0;
769
770 ret = platform_pci_choose_state(dev);
771 if (ret != PCI_POWER_ERROR)
772 return ret;
773
774 switch (state.event) {
775 case PM_EVENT_ON:
776 return PCI_D0;
777 case PM_EVENT_FREEZE:
778 case PM_EVENT_PRETHAW:
779 /* REVISIT both freeze and pre-thaw "should" use D0 */
780 case PM_EVENT_SUSPEND:
781 case PM_EVENT_HIBERNATE:
782 return PCI_D3hot;
783 default:
784 dev_info(&dev->dev, "unrecognized suspend event %d\n",
785 state.event);
786 BUG();
787 }
788 return PCI_D0;
789 }
790
791 EXPORT_SYMBOL(pci_choose_state);
792
793 #define PCI_EXP_SAVE_REGS 7
794
795 #define pcie_cap_has_devctl(type, flags) 1
796 #define pcie_cap_has_lnkctl(type, flags) \
797 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
798 (type == PCI_EXP_TYPE_ROOT_PORT || \
799 type == PCI_EXP_TYPE_ENDPOINT || \
800 type == PCI_EXP_TYPE_LEG_END))
801 #define pcie_cap_has_sltctl(type, flags) \
802 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
803 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
804 (type == PCI_EXP_TYPE_DOWNSTREAM && \
805 (flags & PCI_EXP_FLAGS_SLOT))))
806 #define pcie_cap_has_rtctl(type, flags) \
807 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
808 (type == PCI_EXP_TYPE_ROOT_PORT || \
809 type == PCI_EXP_TYPE_RC_EC))
810 #define pcie_cap_has_devctl2(type, flags) \
811 ((flags & PCI_EXP_FLAGS_VERS) > 1)
812 #define pcie_cap_has_lnkctl2(type, flags) \
813 ((flags & PCI_EXP_FLAGS_VERS) > 1)
814 #define pcie_cap_has_sltctl2(type, flags) \
815 ((flags & PCI_EXP_FLAGS_VERS) > 1)
816
817 static int pci_save_pcie_state(struct pci_dev *dev)
818 {
819 int pos, i = 0;
820 struct pci_cap_saved_state *save_state;
821 u16 *cap;
822 u16 flags;
823
824 pos = pci_pcie_cap(dev);
825 if (!pos)
826 return 0;
827
828 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
829 if (!save_state) {
830 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
831 return -ENOMEM;
832 }
833 cap = (u16 *)&save_state->cap.data[0];
834
835 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
836
837 if (pcie_cap_has_devctl(dev->pcie_type, flags))
838 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
839 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
840 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
841 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
842 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
843 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
844 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
845 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
846 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
847 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
848 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
849 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
850 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
851
852 return 0;
853 }
854
855 static void pci_restore_pcie_state(struct pci_dev *dev)
856 {
857 int i = 0, pos;
858 struct pci_cap_saved_state *save_state;
859 u16 *cap;
860 u16 flags;
861
862 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
863 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
864 if (!save_state || pos <= 0)
865 return;
866 cap = (u16 *)&save_state->cap.data[0];
867
868 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
869
870 if (pcie_cap_has_devctl(dev->pcie_type, flags))
871 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
872 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
873 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
874 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
875 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
876 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
877 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
878 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
879 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
880 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
881 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
882 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
883 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
884 }
885
886
887 static int pci_save_pcix_state(struct pci_dev *dev)
888 {
889 int pos;
890 struct pci_cap_saved_state *save_state;
891
892 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
893 if (pos <= 0)
894 return 0;
895
896 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
897 if (!save_state) {
898 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
899 return -ENOMEM;
900 }
901
902 pci_read_config_word(dev, pos + PCI_X_CMD,
903 (u16 *)save_state->cap.data);
904
905 return 0;
906 }
907
908 static void pci_restore_pcix_state(struct pci_dev *dev)
909 {
910 int i = 0, pos;
911 struct pci_cap_saved_state *save_state;
912 u16 *cap;
913
914 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
915 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
916 if (!save_state || pos <= 0)
917 return;
918 cap = (u16 *)&save_state->cap.data[0];
919
920 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
921 }
922
923
924 /**
925 * pci_save_state - save the PCI configuration space of a device before suspending
926 * @dev: - PCI device that we're dealing with
927 */
928 int
929 pci_save_state(struct pci_dev *dev)
930 {
931 int i;
932 /* XXX: 100% dword access ok here? */
933 for (i = 0; i < 16; i++)
934 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
935 dev->state_saved = true;
936 if ((i = pci_save_pcie_state(dev)) != 0)
937 return i;
938 if ((i = pci_save_pcix_state(dev)) != 0)
939 return i;
940 return 0;
941 }
942
943 /**
944 * pci_restore_state - Restore the saved state of a PCI device
945 * @dev: - PCI device that we're dealing with
946 */
947 void pci_restore_state(struct pci_dev *dev)
948 {
949 int i;
950 u32 val;
951
952 if (!dev->state_saved)
953 return;
954
955 /* PCI Express register must be restored first */
956 pci_restore_pcie_state(dev);
957
958 /*
959 * The Base Address register should be programmed before the command
960 * register(s)
961 */
962 for (i = 15; i >= 0; i--) {
963 pci_read_config_dword(dev, i * 4, &val);
964 if (val != dev->saved_config_space[i]) {
965 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
966 "space at offset %#x (was %#x, writing %#x)\n",
967 i, val, (int)dev->saved_config_space[i]);
968 pci_write_config_dword(dev,i * 4,
969 dev->saved_config_space[i]);
970 }
971 }
972 pci_restore_pcix_state(dev);
973 pci_restore_msi_state(dev);
974 pci_restore_iov_state(dev);
975
976 dev->state_saved = false;
977 }
978
979 struct pci_saved_state {
980 u32 config_space[16];
981 struct pci_cap_saved_data cap[0];
982 };
983
984 /**
985 * pci_store_saved_state - Allocate and return an opaque struct containing
986 * the device saved state.
987 * @dev: PCI device that we're dealing with
988 *
989 * Rerturn NULL if no state or error.
990 */
991 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
992 {
993 struct pci_saved_state *state;
994 struct pci_cap_saved_state *tmp;
995 struct pci_cap_saved_data *cap;
996 struct hlist_node *pos;
997 size_t size;
998
999 if (!dev->state_saved)
1000 return NULL;
1001
1002 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1003
1004 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1005 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1006
1007 state = kzalloc(size, GFP_KERNEL);
1008 if (!state)
1009 return NULL;
1010
1011 memcpy(state->config_space, dev->saved_config_space,
1012 sizeof(state->config_space));
1013
1014 cap = state->cap;
1015 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1016 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1017 memcpy(cap, &tmp->cap, len);
1018 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1019 }
1020 /* Empty cap_save terminates list */
1021
1022 return state;
1023 }
1024 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1025
1026 /**
1027 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1028 * @dev: PCI device that we're dealing with
1029 * @state: Saved state returned from pci_store_saved_state()
1030 */
1031 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1032 {
1033 struct pci_cap_saved_data *cap;
1034
1035 dev->state_saved = false;
1036
1037 if (!state)
1038 return 0;
1039
1040 memcpy(dev->saved_config_space, state->config_space,
1041 sizeof(state->config_space));
1042
1043 cap = state->cap;
1044 while (cap->size) {
1045 struct pci_cap_saved_state *tmp;
1046
1047 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1048 if (!tmp || tmp->cap.size != cap->size)
1049 return -EINVAL;
1050
1051 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1052 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1053 sizeof(struct pci_cap_saved_data) + cap->size);
1054 }
1055
1056 dev->state_saved = true;
1057 return 0;
1058 }
1059 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1060
1061 /**
1062 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1063 * and free the memory allocated for it.
1064 * @dev: PCI device that we're dealing with
1065 * @state: Pointer to saved state returned from pci_store_saved_state()
1066 */
1067 int pci_load_and_free_saved_state(struct pci_dev *dev,
1068 struct pci_saved_state **state)
1069 {
1070 int ret = pci_load_saved_state(dev, *state);
1071 kfree(*state);
1072 *state = NULL;
1073 return ret;
1074 }
1075 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1076
1077 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1078 {
1079 int err;
1080
1081 err = pci_set_power_state(dev, PCI_D0);
1082 if (err < 0 && err != -EIO)
1083 return err;
1084 err = pcibios_enable_device(dev, bars);
1085 if (err < 0)
1086 return err;
1087 pci_fixup_device(pci_fixup_enable, dev);
1088
1089 return 0;
1090 }
1091
1092 /**
1093 * pci_reenable_device - Resume abandoned device
1094 * @dev: PCI device to be resumed
1095 *
1096 * Note this function is a backend of pci_default_resume and is not supposed
1097 * to be called by normal code, write proper resume handler and use it instead.
1098 */
1099 int pci_reenable_device(struct pci_dev *dev)
1100 {
1101 if (pci_is_enabled(dev))
1102 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1103 return 0;
1104 }
1105
1106 static int __pci_enable_device_flags(struct pci_dev *dev,
1107 resource_size_t flags)
1108 {
1109 int err;
1110 int i, bars = 0;
1111
1112 /*
1113 * Power state could be unknown at this point, either due to a fresh
1114 * boot or a device removal call. So get the current power state
1115 * so that things like MSI message writing will behave as expected
1116 * (e.g. if the device really is in D0 at enable time).
1117 */
1118 if (dev->pm_cap) {
1119 u16 pmcsr;
1120 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1121 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1122 }
1123
1124 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1125 return 0; /* already enabled */
1126
1127 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1128 if (dev->resource[i].flags & flags)
1129 bars |= (1 << i);
1130
1131 err = do_pci_enable_device(dev, bars);
1132 if (err < 0)
1133 atomic_dec(&dev->enable_cnt);
1134 return err;
1135 }
1136
1137 /**
1138 * pci_enable_device_io - Initialize a device for use with IO space
1139 * @dev: PCI device to be initialized
1140 *
1141 * Initialize device before it's used by a driver. Ask low-level code
1142 * to enable I/O resources. Wake up the device if it was suspended.
1143 * Beware, this function can fail.
1144 */
1145 int pci_enable_device_io(struct pci_dev *dev)
1146 {
1147 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1148 }
1149
1150 /**
1151 * pci_enable_device_mem - Initialize a device for use with Memory space
1152 * @dev: PCI device to be initialized
1153 *
1154 * Initialize device before it's used by a driver. Ask low-level code
1155 * to enable Memory resources. Wake up the device if it was suspended.
1156 * Beware, this function can fail.
1157 */
1158 int pci_enable_device_mem(struct pci_dev *dev)
1159 {
1160 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1161 }
1162
1163 /**
1164 * pci_enable_device - Initialize device before it's used by a driver.
1165 * @dev: PCI device to be initialized
1166 *
1167 * Initialize device before it's used by a driver. Ask low-level code
1168 * to enable I/O and memory. Wake up the device if it was suspended.
1169 * Beware, this function can fail.
1170 *
1171 * Note we don't actually enable the device many times if we call
1172 * this function repeatedly (we just increment the count).
1173 */
1174 int pci_enable_device(struct pci_dev *dev)
1175 {
1176 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1177 }
1178
1179 /*
1180 * Managed PCI resources. This manages device on/off, intx/msi/msix
1181 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1182 * there's no need to track it separately. pci_devres is initialized
1183 * when a device is enabled using managed PCI device enable interface.
1184 */
1185 struct pci_devres {
1186 unsigned int enabled:1;
1187 unsigned int pinned:1;
1188 unsigned int orig_intx:1;
1189 unsigned int restore_intx:1;
1190 u32 region_mask;
1191 };
1192
1193 static void pcim_release(struct device *gendev, void *res)
1194 {
1195 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1196 struct pci_devres *this = res;
1197 int i;
1198
1199 if (dev->msi_enabled)
1200 pci_disable_msi(dev);
1201 if (dev->msix_enabled)
1202 pci_disable_msix(dev);
1203
1204 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1205 if (this->region_mask & (1 << i))
1206 pci_release_region(dev, i);
1207
1208 if (this->restore_intx)
1209 pci_intx(dev, this->orig_intx);
1210
1211 if (this->enabled && !this->pinned)
1212 pci_disable_device(dev);
1213 }
1214
1215 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1216 {
1217 struct pci_devres *dr, *new_dr;
1218
1219 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1220 if (dr)
1221 return dr;
1222
1223 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1224 if (!new_dr)
1225 return NULL;
1226 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1227 }
1228
1229 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1230 {
1231 if (pci_is_managed(pdev))
1232 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1233 return NULL;
1234 }
1235
1236 /**
1237 * pcim_enable_device - Managed pci_enable_device()
1238 * @pdev: PCI device to be initialized
1239 *
1240 * Managed pci_enable_device().
1241 */
1242 int pcim_enable_device(struct pci_dev *pdev)
1243 {
1244 struct pci_devres *dr;
1245 int rc;
1246
1247 dr = get_pci_dr(pdev);
1248 if (unlikely(!dr))
1249 return -ENOMEM;
1250 if (dr->enabled)
1251 return 0;
1252
1253 rc = pci_enable_device(pdev);
1254 if (!rc) {
1255 pdev->is_managed = 1;
1256 dr->enabled = 1;
1257 }
1258 return rc;
1259 }
1260
1261 /**
1262 * pcim_pin_device - Pin managed PCI device
1263 * @pdev: PCI device to pin
1264 *
1265 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1266 * driver detach. @pdev must have been enabled with
1267 * pcim_enable_device().
1268 */
1269 void pcim_pin_device(struct pci_dev *pdev)
1270 {
1271 struct pci_devres *dr;
1272
1273 dr = find_pci_dr(pdev);
1274 WARN_ON(!dr || !dr->enabled);
1275 if (dr)
1276 dr->pinned = 1;
1277 }
1278
1279 /**
1280 * pcibios_disable_device - disable arch specific PCI resources for device dev
1281 * @dev: the PCI device to disable
1282 *
1283 * Disables architecture specific PCI resources for the device. This
1284 * is the default implementation. Architecture implementations can
1285 * override this.
1286 */
1287 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1288
1289 static void do_pci_disable_device(struct pci_dev *dev)
1290 {
1291 u16 pci_command;
1292
1293 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1294 if (pci_command & PCI_COMMAND_MASTER) {
1295 pci_command &= ~PCI_COMMAND_MASTER;
1296 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1297 }
1298
1299 pcibios_disable_device(dev);
1300 }
1301
1302 /**
1303 * pci_disable_enabled_device - Disable device without updating enable_cnt
1304 * @dev: PCI device to disable
1305 *
1306 * NOTE: This function is a backend of PCI power management routines and is
1307 * not supposed to be called drivers.
1308 */
1309 void pci_disable_enabled_device(struct pci_dev *dev)
1310 {
1311 if (pci_is_enabled(dev))
1312 do_pci_disable_device(dev);
1313 }
1314
1315 /**
1316 * pci_disable_device - Disable PCI device after use
1317 * @dev: PCI device to be disabled
1318 *
1319 * Signal to the system that the PCI device is not in use by the system
1320 * anymore. This only involves disabling PCI bus-mastering, if active.
1321 *
1322 * Note we don't actually disable the device until all callers of
1323 * pci_enable_device() have called pci_disable_device().
1324 */
1325 void
1326 pci_disable_device(struct pci_dev *dev)
1327 {
1328 struct pci_devres *dr;
1329
1330 dr = find_pci_dr(dev);
1331 if (dr)
1332 dr->enabled = 0;
1333
1334 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1335 return;
1336
1337 do_pci_disable_device(dev);
1338
1339 dev->is_busmaster = 0;
1340 }
1341
1342 /**
1343 * pcibios_set_pcie_reset_state - set reset state for device dev
1344 * @dev: the PCIe device reset
1345 * @state: Reset state to enter into
1346 *
1347 *
1348 * Sets the PCIe reset state for the device. This is the default
1349 * implementation. Architecture implementations can override this.
1350 */
1351 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1352 enum pcie_reset_state state)
1353 {
1354 return -EINVAL;
1355 }
1356
1357 /**
1358 * pci_set_pcie_reset_state - set reset state for device dev
1359 * @dev: the PCIe device reset
1360 * @state: Reset state to enter into
1361 *
1362 *
1363 * Sets the PCI reset state for the device.
1364 */
1365 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1366 {
1367 return pcibios_set_pcie_reset_state(dev, state);
1368 }
1369
1370 /**
1371 * pci_check_pme_status - Check if given device has generated PME.
1372 * @dev: Device to check.
1373 *
1374 * Check the PME status of the device and if set, clear it and clear PME enable
1375 * (if set). Return 'true' if PME status and PME enable were both set or
1376 * 'false' otherwise.
1377 */
1378 bool pci_check_pme_status(struct pci_dev *dev)
1379 {
1380 int pmcsr_pos;
1381 u16 pmcsr;
1382 bool ret = false;
1383
1384 if (!dev->pm_cap)
1385 return false;
1386
1387 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1388 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1389 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1390 return false;
1391
1392 /* Clear PME status. */
1393 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1394 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1395 /* Disable PME to avoid interrupt flood. */
1396 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1397 ret = true;
1398 }
1399
1400 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1401
1402 return ret;
1403 }
1404
1405 /**
1406 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1407 * @dev: Device to handle.
1408 * @ign: Ignored.
1409 *
1410 * Check if @dev has generated PME and queue a resume request for it in that
1411 * case.
1412 */
1413 static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1414 {
1415 if (pci_check_pme_status(dev)) {
1416 pci_wakeup_event(dev);
1417 pm_request_resume(&dev->dev);
1418 }
1419 return 0;
1420 }
1421
1422 /**
1423 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1424 * @bus: Top bus of the subtree to walk.
1425 */
1426 void pci_pme_wakeup_bus(struct pci_bus *bus)
1427 {
1428 if (bus)
1429 pci_walk_bus(bus, pci_pme_wakeup, NULL);
1430 }
1431
1432 /**
1433 * pci_pme_capable - check the capability of PCI device to generate PME#
1434 * @dev: PCI device to handle.
1435 * @state: PCI state from which device will issue PME#.
1436 */
1437 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1438 {
1439 if (!dev->pm_cap)
1440 return false;
1441
1442 return !!(dev->pme_support & (1 << state));
1443 }
1444
1445 static void pci_pme_list_scan(struct work_struct *work)
1446 {
1447 struct pci_pme_device *pme_dev;
1448
1449 mutex_lock(&pci_pme_list_mutex);
1450 if (!list_empty(&pci_pme_list)) {
1451 list_for_each_entry(pme_dev, &pci_pme_list, list)
1452 pci_pme_wakeup(pme_dev->dev, NULL);
1453 schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
1454 }
1455 mutex_unlock(&pci_pme_list_mutex);
1456 }
1457
1458 /**
1459 * pci_external_pme - is a device an external PCI PME source?
1460 * @dev: PCI device to check
1461 *
1462 */
1463
1464 static bool pci_external_pme(struct pci_dev *dev)
1465 {
1466 if (pci_is_pcie(dev) || dev->bus->number == 0)
1467 return false;
1468 return true;
1469 }
1470
1471 /**
1472 * pci_pme_active - enable or disable PCI device's PME# function
1473 * @dev: PCI device to handle.
1474 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1475 *
1476 * The caller must verify that the device is capable of generating PME# before
1477 * calling this function with @enable equal to 'true'.
1478 */
1479 void pci_pme_active(struct pci_dev *dev, bool enable)
1480 {
1481 u16 pmcsr;
1482
1483 if (!dev->pm_cap)
1484 return;
1485
1486 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1487 /* Clear PME_Status by writing 1 to it and enable PME# */
1488 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1489 if (!enable)
1490 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1491
1492 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1493
1494 /* PCI (as opposed to PCIe) PME requires that the device have
1495 its PME# line hooked up correctly. Not all hardware vendors
1496 do this, so the PME never gets delivered and the device
1497 remains asleep. The easiest way around this is to
1498 periodically walk the list of suspended devices and check
1499 whether any have their PME flag set. The assumption is that
1500 we'll wake up often enough anyway that this won't be a huge
1501 hit, and the power savings from the devices will still be a
1502 win. */
1503
1504 if (pci_external_pme(dev)) {
1505 struct pci_pme_device *pme_dev;
1506 if (enable) {
1507 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1508 GFP_KERNEL);
1509 if (!pme_dev)
1510 goto out;
1511 pme_dev->dev = dev;
1512 mutex_lock(&pci_pme_list_mutex);
1513 list_add(&pme_dev->list, &pci_pme_list);
1514 if (list_is_singular(&pci_pme_list))
1515 schedule_delayed_work(&pci_pme_work,
1516 msecs_to_jiffies(PME_TIMEOUT));
1517 mutex_unlock(&pci_pme_list_mutex);
1518 } else {
1519 mutex_lock(&pci_pme_list_mutex);
1520 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1521 if (pme_dev->dev == dev) {
1522 list_del(&pme_dev->list);
1523 kfree(pme_dev);
1524 break;
1525 }
1526 }
1527 mutex_unlock(&pci_pme_list_mutex);
1528 }
1529 }
1530
1531 out:
1532 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1533 enable ? "enabled" : "disabled");
1534 }
1535
1536 /**
1537 * __pci_enable_wake - enable PCI device as wakeup event source
1538 * @dev: PCI device affected
1539 * @state: PCI state from which device will issue wakeup events
1540 * @runtime: True if the events are to be generated at run time
1541 * @enable: True to enable event generation; false to disable
1542 *
1543 * This enables the device as a wakeup event source, or disables it.
1544 * When such events involves platform-specific hooks, those hooks are
1545 * called automatically by this routine.
1546 *
1547 * Devices with legacy power management (no standard PCI PM capabilities)
1548 * always require such platform hooks.
1549 *
1550 * RETURN VALUE:
1551 * 0 is returned on success
1552 * -EINVAL is returned if device is not supposed to wake up the system
1553 * Error code depending on the platform is returned if both the platform and
1554 * the native mechanism fail to enable the generation of wake-up events
1555 */
1556 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1557 bool runtime, bool enable)
1558 {
1559 int ret = 0;
1560
1561 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1562 return -EINVAL;
1563
1564 /* Don't do the same thing twice in a row for one device. */
1565 if (!!enable == !!dev->wakeup_prepared)
1566 return 0;
1567
1568 /*
1569 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1570 * Anderson we should be doing PME# wake enable followed by ACPI wake
1571 * enable. To disable wake-up we call the platform first, for symmetry.
1572 */
1573
1574 if (enable) {
1575 int error;
1576
1577 if (pci_pme_capable(dev, state))
1578 pci_pme_active(dev, true);
1579 else
1580 ret = 1;
1581 error = runtime ? platform_pci_run_wake(dev, true) :
1582 platform_pci_sleep_wake(dev, true);
1583 if (ret)
1584 ret = error;
1585 if (!ret)
1586 dev->wakeup_prepared = true;
1587 } else {
1588 if (runtime)
1589 platform_pci_run_wake(dev, false);
1590 else
1591 platform_pci_sleep_wake(dev, false);
1592 pci_pme_active(dev, false);
1593 dev->wakeup_prepared = false;
1594 }
1595
1596 return ret;
1597 }
1598 EXPORT_SYMBOL(__pci_enable_wake);
1599
1600 /**
1601 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1602 * @dev: PCI device to prepare
1603 * @enable: True to enable wake-up event generation; false to disable
1604 *
1605 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1606 * and this function allows them to set that up cleanly - pci_enable_wake()
1607 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1608 * ordering constraints.
1609 *
1610 * This function only returns error code if the device is not capable of
1611 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1612 * enable wake-up power for it.
1613 */
1614 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1615 {
1616 return pci_pme_capable(dev, PCI_D3cold) ?
1617 pci_enable_wake(dev, PCI_D3cold, enable) :
1618 pci_enable_wake(dev, PCI_D3hot, enable);
1619 }
1620
1621 /**
1622 * pci_target_state - find an appropriate low power state for a given PCI dev
1623 * @dev: PCI device
1624 *
1625 * Use underlying platform code to find a supported low power state for @dev.
1626 * If the platform can't manage @dev, return the deepest state from which it
1627 * can generate wake events, based on any available PME info.
1628 */
1629 pci_power_t pci_target_state(struct pci_dev *dev)
1630 {
1631 pci_power_t target_state = PCI_D3hot;
1632
1633 if (platform_pci_power_manageable(dev)) {
1634 /*
1635 * Call the platform to choose the target state of the device
1636 * and enable wake-up from this state if supported.
1637 */
1638 pci_power_t state = platform_pci_choose_state(dev);
1639
1640 switch (state) {
1641 case PCI_POWER_ERROR:
1642 case PCI_UNKNOWN:
1643 break;
1644 case PCI_D1:
1645 case PCI_D2:
1646 if (pci_no_d1d2(dev))
1647 break;
1648 default:
1649 target_state = state;
1650 }
1651 } else if (!dev->pm_cap) {
1652 target_state = PCI_D0;
1653 } else if (device_may_wakeup(&dev->dev)) {
1654 /*
1655 * Find the deepest state from which the device can generate
1656 * wake-up events, make it the target state and enable device
1657 * to generate PME#.
1658 */
1659 if (dev->pme_support) {
1660 while (target_state
1661 && !(dev->pme_support & (1 << target_state)))
1662 target_state--;
1663 }
1664 }
1665
1666 return target_state;
1667 }
1668
1669 /**
1670 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1671 * @dev: Device to handle.
1672 *
1673 * Choose the power state appropriate for the device depending on whether
1674 * it can wake up the system and/or is power manageable by the platform
1675 * (PCI_D3hot is the default) and put the device into that state.
1676 */
1677 int pci_prepare_to_sleep(struct pci_dev *dev)
1678 {
1679 pci_power_t target_state = pci_target_state(dev);
1680 int error;
1681
1682 if (target_state == PCI_POWER_ERROR)
1683 return -EIO;
1684
1685 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1686
1687 error = pci_set_power_state(dev, target_state);
1688
1689 if (error)
1690 pci_enable_wake(dev, target_state, false);
1691
1692 return error;
1693 }
1694
1695 /**
1696 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1697 * @dev: Device to handle.
1698 *
1699 * Disable device's system wake-up capability and put it into D0.
1700 */
1701 int pci_back_from_sleep(struct pci_dev *dev)
1702 {
1703 pci_enable_wake(dev, PCI_D0, false);
1704 return pci_set_power_state(dev, PCI_D0);
1705 }
1706
1707 /**
1708 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1709 * @dev: PCI device being suspended.
1710 *
1711 * Prepare @dev to generate wake-up events at run time and put it into a low
1712 * power state.
1713 */
1714 int pci_finish_runtime_suspend(struct pci_dev *dev)
1715 {
1716 pci_power_t target_state = pci_target_state(dev);
1717 int error;
1718
1719 if (target_state == PCI_POWER_ERROR)
1720 return -EIO;
1721
1722 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1723
1724 error = pci_set_power_state(dev, target_state);
1725
1726 if (error)
1727 __pci_enable_wake(dev, target_state, true, false);
1728
1729 return error;
1730 }
1731
1732 /**
1733 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1734 * @dev: Device to check.
1735 *
1736 * Return true if the device itself is cabable of generating wake-up events
1737 * (through the platform or using the native PCIe PME) or if the device supports
1738 * PME and one of its upstream bridges can generate wake-up events.
1739 */
1740 bool pci_dev_run_wake(struct pci_dev *dev)
1741 {
1742 struct pci_bus *bus = dev->bus;
1743
1744 if (device_run_wake(&dev->dev))
1745 return true;
1746
1747 if (!dev->pme_support)
1748 return false;
1749
1750 while (bus->parent) {
1751 struct pci_dev *bridge = bus->self;
1752
1753 if (device_run_wake(&bridge->dev))
1754 return true;
1755
1756 bus = bus->parent;
1757 }
1758
1759 /* We have reached the root bus. */
1760 if (bus->bridge)
1761 return device_run_wake(bus->bridge);
1762
1763 return false;
1764 }
1765 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1766
1767 /**
1768 * pci_pm_init - Initialize PM functions of given PCI device
1769 * @dev: PCI device to handle.
1770 */
1771 void pci_pm_init(struct pci_dev *dev)
1772 {
1773 int pm;
1774 u16 pmc;
1775
1776 pm_runtime_forbid(&dev->dev);
1777 device_enable_async_suspend(&dev->dev);
1778 dev->wakeup_prepared = false;
1779
1780 dev->pm_cap = 0;
1781
1782 /* find PCI PM capability in list */
1783 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1784 if (!pm)
1785 return;
1786 /* Check device's ability to generate PME# */
1787 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1788
1789 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1790 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1791 pmc & PCI_PM_CAP_VER_MASK);
1792 return;
1793 }
1794
1795 dev->pm_cap = pm;
1796 dev->d3_delay = PCI_PM_D3_WAIT;
1797
1798 dev->d1_support = false;
1799 dev->d2_support = false;
1800 if (!pci_no_d1d2(dev)) {
1801 if (pmc & PCI_PM_CAP_D1)
1802 dev->d1_support = true;
1803 if (pmc & PCI_PM_CAP_D2)
1804 dev->d2_support = true;
1805
1806 if (dev->d1_support || dev->d2_support)
1807 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1808 dev->d1_support ? " D1" : "",
1809 dev->d2_support ? " D2" : "");
1810 }
1811
1812 pmc &= PCI_PM_CAP_PME_MASK;
1813 if (pmc) {
1814 dev_printk(KERN_DEBUG, &dev->dev,
1815 "PME# supported from%s%s%s%s%s\n",
1816 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1817 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1818 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1819 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1820 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1821 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1822 /*
1823 * Make device's PM flags reflect the wake-up capability, but
1824 * let the user space enable it to wake up the system as needed.
1825 */
1826 device_set_wakeup_capable(&dev->dev, true);
1827 /* Disable the PME# generation functionality */
1828 pci_pme_active(dev, false);
1829 } else {
1830 dev->pme_support = 0;
1831 }
1832 }
1833
1834 /**
1835 * platform_pci_wakeup_init - init platform wakeup if present
1836 * @dev: PCI device
1837 *
1838 * Some devices don't have PCI PM caps but can still generate wakeup
1839 * events through platform methods (like ACPI events). If @dev supports
1840 * platform wakeup events, set the device flag to indicate as much. This
1841 * may be redundant if the device also supports PCI PM caps, but double
1842 * initialization should be safe in that case.
1843 */
1844 void platform_pci_wakeup_init(struct pci_dev *dev)
1845 {
1846 if (!platform_pci_can_wakeup(dev))
1847 return;
1848
1849 device_set_wakeup_capable(&dev->dev, true);
1850 platform_pci_sleep_wake(dev, false);
1851 }
1852
1853 /**
1854 * pci_add_save_buffer - allocate buffer for saving given capability registers
1855 * @dev: the PCI device
1856 * @cap: the capability to allocate the buffer for
1857 * @size: requested size of the buffer
1858 */
1859 static int pci_add_cap_save_buffer(
1860 struct pci_dev *dev, char cap, unsigned int size)
1861 {
1862 int pos;
1863 struct pci_cap_saved_state *save_state;
1864
1865 pos = pci_find_capability(dev, cap);
1866 if (pos <= 0)
1867 return 0;
1868
1869 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1870 if (!save_state)
1871 return -ENOMEM;
1872
1873 save_state->cap.cap_nr = cap;
1874 save_state->cap.size = size;
1875 pci_add_saved_cap(dev, save_state);
1876
1877 return 0;
1878 }
1879
1880 /**
1881 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1882 * @dev: the PCI device
1883 */
1884 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1885 {
1886 int error;
1887
1888 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1889 PCI_EXP_SAVE_REGS * sizeof(u16));
1890 if (error)
1891 dev_err(&dev->dev,
1892 "unable to preallocate PCI Express save buffer\n");
1893
1894 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1895 if (error)
1896 dev_err(&dev->dev,
1897 "unable to preallocate PCI-X save buffer\n");
1898 }
1899
1900 /**
1901 * pci_enable_ari - enable ARI forwarding if hardware support it
1902 * @dev: the PCI device
1903 */
1904 void pci_enable_ari(struct pci_dev *dev)
1905 {
1906 int pos;
1907 u32 cap;
1908 u16 ctrl;
1909 struct pci_dev *bridge;
1910
1911 if (!pci_is_pcie(dev) || dev->devfn)
1912 return;
1913
1914 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1915 if (!pos)
1916 return;
1917
1918 bridge = dev->bus->self;
1919 if (!bridge || !pci_is_pcie(bridge))
1920 return;
1921
1922 pos = pci_pcie_cap(bridge);
1923 if (!pos)
1924 return;
1925
1926 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1927 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1928 return;
1929
1930 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1931 ctrl |= PCI_EXP_DEVCTL2_ARI;
1932 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1933
1934 bridge->ari_enabled = 1;
1935 }
1936
1937 /**
1938 * pci_enable_ido - enable ID-based ordering on a device
1939 * @dev: the PCI device
1940 * @type: which types of IDO to enable
1941 *
1942 * Enable ID-based ordering on @dev. @type can contain the bits
1943 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
1944 * which types of transactions are allowed to be re-ordered.
1945 */
1946 void pci_enable_ido(struct pci_dev *dev, unsigned long type)
1947 {
1948 int pos;
1949 u16 ctrl;
1950
1951 pos = pci_pcie_cap(dev);
1952 if (!pos)
1953 return;
1954
1955 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1956 if (type & PCI_EXP_IDO_REQUEST)
1957 ctrl |= PCI_EXP_IDO_REQ_EN;
1958 if (type & PCI_EXP_IDO_COMPLETION)
1959 ctrl |= PCI_EXP_IDO_CMP_EN;
1960 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1961 }
1962 EXPORT_SYMBOL(pci_enable_ido);
1963
1964 /**
1965 * pci_disable_ido - disable ID-based ordering on a device
1966 * @dev: the PCI device
1967 * @type: which types of IDO to disable
1968 */
1969 void pci_disable_ido(struct pci_dev *dev, unsigned long type)
1970 {
1971 int pos;
1972 u16 ctrl;
1973
1974 if (!pci_is_pcie(dev))
1975 return;
1976
1977 pos = pci_pcie_cap(dev);
1978 if (!pos)
1979 return;
1980
1981 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1982 if (type & PCI_EXP_IDO_REQUEST)
1983 ctrl &= ~PCI_EXP_IDO_REQ_EN;
1984 if (type & PCI_EXP_IDO_COMPLETION)
1985 ctrl &= ~PCI_EXP_IDO_CMP_EN;
1986 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1987 }
1988 EXPORT_SYMBOL(pci_disable_ido);
1989
1990 /**
1991 * pci_enable_obff - enable optimized buffer flush/fill
1992 * @dev: PCI device
1993 * @type: type of signaling to use
1994 *
1995 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
1996 * signaling if possible, falling back to message signaling only if
1997 * WAKE# isn't supported. @type should indicate whether the PCIe link
1998 * be brought out of L0s or L1 to send the message. It should be either
1999 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2000 *
2001 * If your device can benefit from receiving all messages, even at the
2002 * power cost of bringing the link back up from a low power state, use
2003 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2004 * preferred type).
2005 *
2006 * RETURNS:
2007 * Zero on success, appropriate error number on failure.
2008 */
2009 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2010 {
2011 int pos;
2012 u32 cap;
2013 u16 ctrl;
2014 int ret;
2015
2016 if (!pci_is_pcie(dev))
2017 return -ENOTSUPP;
2018
2019 pos = pci_pcie_cap(dev);
2020 if (!pos)
2021 return -ENOTSUPP;
2022
2023 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2024 if (!(cap & PCI_EXP_OBFF_MASK))
2025 return -ENOTSUPP; /* no OBFF support at all */
2026
2027 /* Make sure the topology supports OBFF as well */
2028 if (dev->bus) {
2029 ret = pci_enable_obff(dev->bus->self, type);
2030 if (ret)
2031 return ret;
2032 }
2033
2034 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2035 if (cap & PCI_EXP_OBFF_WAKE)
2036 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2037 else {
2038 switch (type) {
2039 case PCI_EXP_OBFF_SIGNAL_L0:
2040 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2041 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2042 break;
2043 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2044 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2045 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2046 break;
2047 default:
2048 WARN(1, "bad OBFF signal type\n");
2049 return -ENOTSUPP;
2050 }
2051 }
2052 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2053
2054 return 0;
2055 }
2056 EXPORT_SYMBOL(pci_enable_obff);
2057
2058 /**
2059 * pci_disable_obff - disable optimized buffer flush/fill
2060 * @dev: PCI device
2061 *
2062 * Disable OBFF on @dev.
2063 */
2064 void pci_disable_obff(struct pci_dev *dev)
2065 {
2066 int pos;
2067 u16 ctrl;
2068
2069 if (!pci_is_pcie(dev))
2070 return;
2071
2072 pos = pci_pcie_cap(dev);
2073 if (!pos)
2074 return;
2075
2076 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2077 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2078 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2079 }
2080 EXPORT_SYMBOL(pci_disable_obff);
2081
2082 /**
2083 * pci_ltr_supported - check whether a device supports LTR
2084 * @dev: PCI device
2085 *
2086 * RETURNS:
2087 * True if @dev supports latency tolerance reporting, false otherwise.
2088 */
2089 bool pci_ltr_supported(struct pci_dev *dev)
2090 {
2091 int pos;
2092 u32 cap;
2093
2094 if (!pci_is_pcie(dev))
2095 return false;
2096
2097 pos = pci_pcie_cap(dev);
2098 if (!pos)
2099 return false;
2100
2101 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2102
2103 return cap & PCI_EXP_DEVCAP2_LTR;
2104 }
2105 EXPORT_SYMBOL(pci_ltr_supported);
2106
2107 /**
2108 * pci_enable_ltr - enable latency tolerance reporting
2109 * @dev: PCI device
2110 *
2111 * Enable LTR on @dev if possible, which means enabling it first on
2112 * upstream ports.
2113 *
2114 * RETURNS:
2115 * Zero on success, errno on failure.
2116 */
2117 int pci_enable_ltr(struct pci_dev *dev)
2118 {
2119 int pos;
2120 u16 ctrl;
2121 int ret;
2122
2123 if (!pci_ltr_supported(dev))
2124 return -ENOTSUPP;
2125
2126 pos = pci_pcie_cap(dev);
2127 if (!pos)
2128 return -ENOTSUPP;
2129
2130 /* Only primary function can enable/disable LTR */
2131 if (PCI_FUNC(dev->devfn) != 0)
2132 return -EINVAL;
2133
2134 /* Enable upstream ports first */
2135 if (dev->bus) {
2136 ret = pci_enable_ltr(dev->bus->self);
2137 if (ret)
2138 return ret;
2139 }
2140
2141 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2142 ctrl |= PCI_EXP_LTR_EN;
2143 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2144
2145 return 0;
2146 }
2147 EXPORT_SYMBOL(pci_enable_ltr);
2148
2149 /**
2150 * pci_disable_ltr - disable latency tolerance reporting
2151 * @dev: PCI device
2152 */
2153 void pci_disable_ltr(struct pci_dev *dev)
2154 {
2155 int pos;
2156 u16 ctrl;
2157
2158 if (!pci_ltr_supported(dev))
2159 return;
2160
2161 pos = pci_pcie_cap(dev);
2162 if (!pos)
2163 return;
2164
2165 /* Only primary function can enable/disable LTR */
2166 if (PCI_FUNC(dev->devfn) != 0)
2167 return;
2168
2169 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2170 ctrl &= ~PCI_EXP_LTR_EN;
2171 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2172 }
2173 EXPORT_SYMBOL(pci_disable_ltr);
2174
2175 static int __pci_ltr_scale(int *val)
2176 {
2177 int scale = 0;
2178
2179 while (*val > 1023) {
2180 *val = (*val + 31) / 32;
2181 scale++;
2182 }
2183 return scale;
2184 }
2185
2186 /**
2187 * pci_set_ltr - set LTR latency values
2188 * @dev: PCI device
2189 * @snoop_lat_ns: snoop latency in nanoseconds
2190 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2191 *
2192 * Figure out the scale and set the LTR values accordingly.
2193 */
2194 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2195 {
2196 int pos, ret, snoop_scale, nosnoop_scale;
2197 u16 val;
2198
2199 if (!pci_ltr_supported(dev))
2200 return -ENOTSUPP;
2201
2202 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2203 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2204
2205 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2206 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2207 return -EINVAL;
2208
2209 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2210 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2211 return -EINVAL;
2212
2213 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2214 if (!pos)
2215 return -ENOTSUPP;
2216
2217 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2218 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2219 if (ret != 4)
2220 return -EIO;
2221
2222 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2223 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2224 if (ret != 4)
2225 return -EIO;
2226
2227 return 0;
2228 }
2229 EXPORT_SYMBOL(pci_set_ltr);
2230
2231 static int pci_acs_enable;
2232
2233 /**
2234 * pci_request_acs - ask for ACS to be enabled if supported
2235 */
2236 void pci_request_acs(void)
2237 {
2238 pci_acs_enable = 1;
2239 }
2240
2241 /**
2242 * pci_enable_acs - enable ACS if hardware support it
2243 * @dev: the PCI device
2244 */
2245 void pci_enable_acs(struct pci_dev *dev)
2246 {
2247 int pos;
2248 u16 cap;
2249 u16 ctrl;
2250
2251 if (!pci_acs_enable)
2252 return;
2253
2254 if (!pci_is_pcie(dev))
2255 return;
2256
2257 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2258 if (!pos)
2259 return;
2260
2261 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2262 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2263
2264 /* Source Validation */
2265 ctrl |= (cap & PCI_ACS_SV);
2266
2267 /* P2P Request Redirect */
2268 ctrl |= (cap & PCI_ACS_RR);
2269
2270 /* P2P Completion Redirect */
2271 ctrl |= (cap & PCI_ACS_CR);
2272
2273 /* Upstream Forwarding */
2274 ctrl |= (cap & PCI_ACS_UF);
2275
2276 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2277 }
2278
2279 /**
2280 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2281 * @dev: the PCI device
2282 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2283 *
2284 * Perform INTx swizzling for a device behind one level of bridge. This is
2285 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2286 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2287 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2288 * the PCI Express Base Specification, Revision 2.1)
2289 */
2290 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2291 {
2292 int slot;
2293
2294 if (pci_ari_enabled(dev->bus))
2295 slot = 0;
2296 else
2297 slot = PCI_SLOT(dev->devfn);
2298
2299 return (((pin - 1) + slot) % 4) + 1;
2300 }
2301
2302 int
2303 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2304 {
2305 u8 pin;
2306
2307 pin = dev->pin;
2308 if (!pin)
2309 return -1;
2310
2311 while (!pci_is_root_bus(dev->bus)) {
2312 pin = pci_swizzle_interrupt_pin(dev, pin);
2313 dev = dev->bus->self;
2314 }
2315 *bridge = dev;
2316 return pin;
2317 }
2318
2319 /**
2320 * pci_common_swizzle - swizzle INTx all the way to root bridge
2321 * @dev: the PCI device
2322 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2323 *
2324 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2325 * bridges all the way up to a PCI root bus.
2326 */
2327 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2328 {
2329 u8 pin = *pinp;
2330
2331 while (!pci_is_root_bus(dev->bus)) {
2332 pin = pci_swizzle_interrupt_pin(dev, pin);
2333 dev = dev->bus->self;
2334 }
2335 *pinp = pin;
2336 return PCI_SLOT(dev->devfn);
2337 }
2338
2339 /**
2340 * pci_release_region - Release a PCI bar
2341 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2342 * @bar: BAR to release
2343 *
2344 * Releases the PCI I/O and memory resources previously reserved by a
2345 * successful call to pci_request_region. Call this function only
2346 * after all use of the PCI regions has ceased.
2347 */
2348 void pci_release_region(struct pci_dev *pdev, int bar)
2349 {
2350 struct pci_devres *dr;
2351
2352 if (pci_resource_len(pdev, bar) == 0)
2353 return;
2354 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2355 release_region(pci_resource_start(pdev, bar),
2356 pci_resource_len(pdev, bar));
2357 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2358 release_mem_region(pci_resource_start(pdev, bar),
2359 pci_resource_len(pdev, bar));
2360
2361 dr = find_pci_dr(pdev);
2362 if (dr)
2363 dr->region_mask &= ~(1 << bar);
2364 }
2365
2366 /**
2367 * __pci_request_region - Reserved PCI I/O and memory resource
2368 * @pdev: PCI device whose resources are to be reserved
2369 * @bar: BAR to be reserved
2370 * @res_name: Name to be associated with resource.
2371 * @exclusive: whether the region access is exclusive or not
2372 *
2373 * Mark the PCI region associated with PCI device @pdev BR @bar as
2374 * being reserved by owner @res_name. Do not access any
2375 * address inside the PCI regions unless this call returns
2376 * successfully.
2377 *
2378 * If @exclusive is set, then the region is marked so that userspace
2379 * is explicitly not allowed to map the resource via /dev/mem or
2380 * sysfs MMIO access.
2381 *
2382 * Returns 0 on success, or %EBUSY on error. A warning
2383 * message is also printed on failure.
2384 */
2385 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2386 int exclusive)
2387 {
2388 struct pci_devres *dr;
2389
2390 if (pci_resource_len(pdev, bar) == 0)
2391 return 0;
2392
2393 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2394 if (!request_region(pci_resource_start(pdev, bar),
2395 pci_resource_len(pdev, bar), res_name))
2396 goto err_out;
2397 }
2398 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2399 if (!__request_mem_region(pci_resource_start(pdev, bar),
2400 pci_resource_len(pdev, bar), res_name,
2401 exclusive))
2402 goto err_out;
2403 }
2404
2405 dr = find_pci_dr(pdev);
2406 if (dr)
2407 dr->region_mask |= 1 << bar;
2408
2409 return 0;
2410
2411 err_out:
2412 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2413 &pdev->resource[bar]);
2414 return -EBUSY;
2415 }
2416
2417 /**
2418 * pci_request_region - Reserve PCI I/O and memory resource
2419 * @pdev: PCI device whose resources are to be reserved
2420 * @bar: BAR to be reserved
2421 * @res_name: Name to be associated with resource
2422 *
2423 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2424 * being reserved by owner @res_name. Do not access any
2425 * address inside the PCI regions unless this call returns
2426 * successfully.
2427 *
2428 * Returns 0 on success, or %EBUSY on error. A warning
2429 * message is also printed on failure.
2430 */
2431 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2432 {
2433 return __pci_request_region(pdev, bar, res_name, 0);
2434 }
2435
2436 /**
2437 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2438 * @pdev: PCI device whose resources are to be reserved
2439 * @bar: BAR to be reserved
2440 * @res_name: Name to be associated with resource.
2441 *
2442 * Mark the PCI region associated with PCI device @pdev BR @bar as
2443 * being reserved by owner @res_name. Do not access any
2444 * address inside the PCI regions unless this call returns
2445 * successfully.
2446 *
2447 * Returns 0 on success, or %EBUSY on error. A warning
2448 * message is also printed on failure.
2449 *
2450 * The key difference that _exclusive makes it that userspace is
2451 * explicitly not allowed to map the resource via /dev/mem or
2452 * sysfs.
2453 */
2454 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2455 {
2456 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2457 }
2458 /**
2459 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2460 * @pdev: PCI device whose resources were previously reserved
2461 * @bars: Bitmask of BARs to be released
2462 *
2463 * Release selected PCI I/O and memory resources previously reserved.
2464 * Call this function only after all use of the PCI regions has ceased.
2465 */
2466 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2467 {
2468 int i;
2469
2470 for (i = 0; i < 6; i++)
2471 if (bars & (1 << i))
2472 pci_release_region(pdev, i);
2473 }
2474
2475 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2476 const char *res_name, int excl)
2477 {
2478 int i;
2479
2480 for (i = 0; i < 6; i++)
2481 if (bars & (1 << i))
2482 if (__pci_request_region(pdev, i, res_name, excl))
2483 goto err_out;
2484 return 0;
2485
2486 err_out:
2487 while(--i >= 0)
2488 if (bars & (1 << i))
2489 pci_release_region(pdev, i);
2490
2491 return -EBUSY;
2492 }
2493
2494
2495 /**
2496 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2497 * @pdev: PCI device whose resources are to be reserved
2498 * @bars: Bitmask of BARs to be requested
2499 * @res_name: Name to be associated with resource
2500 */
2501 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2502 const char *res_name)
2503 {
2504 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2505 }
2506
2507 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2508 int bars, const char *res_name)
2509 {
2510 return __pci_request_selected_regions(pdev, bars, res_name,
2511 IORESOURCE_EXCLUSIVE);
2512 }
2513
2514 /**
2515 * pci_release_regions - Release reserved PCI I/O and memory resources
2516 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2517 *
2518 * Releases all PCI I/O and memory resources previously reserved by a
2519 * successful call to pci_request_regions. Call this function only
2520 * after all use of the PCI regions has ceased.
2521 */
2522
2523 void pci_release_regions(struct pci_dev *pdev)
2524 {
2525 pci_release_selected_regions(pdev, (1 << 6) - 1);
2526 }
2527
2528 /**
2529 * pci_request_regions - Reserved PCI I/O and memory resources
2530 * @pdev: PCI device whose resources are to be reserved
2531 * @res_name: Name to be associated with resource.
2532 *
2533 * Mark all PCI regions associated with PCI device @pdev as
2534 * being reserved by owner @res_name. Do not access any
2535 * address inside the PCI regions unless this call returns
2536 * successfully.
2537 *
2538 * Returns 0 on success, or %EBUSY on error. A warning
2539 * message is also printed on failure.
2540 */
2541 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2542 {
2543 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2544 }
2545
2546 /**
2547 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2548 * @pdev: PCI device whose resources are to be reserved
2549 * @res_name: Name to be associated with resource.
2550 *
2551 * Mark all PCI regions associated with PCI device @pdev as
2552 * being reserved by owner @res_name. Do not access any
2553 * address inside the PCI regions unless this call returns
2554 * successfully.
2555 *
2556 * pci_request_regions_exclusive() will mark the region so that
2557 * /dev/mem and the sysfs MMIO access will not be allowed.
2558 *
2559 * Returns 0 on success, or %EBUSY on error. A warning
2560 * message is also printed on failure.
2561 */
2562 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2563 {
2564 return pci_request_selected_regions_exclusive(pdev,
2565 ((1 << 6) - 1), res_name);
2566 }
2567
2568 static void __pci_set_master(struct pci_dev *dev, bool enable)
2569 {
2570 u16 old_cmd, cmd;
2571
2572 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2573 if (enable)
2574 cmd = old_cmd | PCI_COMMAND_MASTER;
2575 else
2576 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2577 if (cmd != old_cmd) {
2578 dev_dbg(&dev->dev, "%s bus mastering\n",
2579 enable ? "enabling" : "disabling");
2580 pci_write_config_word(dev, PCI_COMMAND, cmd);
2581 }
2582 dev->is_busmaster = enable;
2583 }
2584
2585 /**
2586 * pci_set_master - enables bus-mastering for device dev
2587 * @dev: the PCI device to enable
2588 *
2589 * Enables bus-mastering on the device and calls pcibios_set_master()
2590 * to do the needed arch specific settings.
2591 */
2592 void pci_set_master(struct pci_dev *dev)
2593 {
2594 __pci_set_master(dev, true);
2595 pcibios_set_master(dev);
2596 }
2597
2598 /**
2599 * pci_clear_master - disables bus-mastering for device dev
2600 * @dev: the PCI device to disable
2601 */
2602 void pci_clear_master(struct pci_dev *dev)
2603 {
2604 __pci_set_master(dev, false);
2605 }
2606
2607 /**
2608 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2609 * @dev: the PCI device for which MWI is to be enabled
2610 *
2611 * Helper function for pci_set_mwi.
2612 * Originally copied from drivers/net/acenic.c.
2613 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2614 *
2615 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2616 */
2617 int pci_set_cacheline_size(struct pci_dev *dev)
2618 {
2619 u8 cacheline_size;
2620
2621 if (!pci_cache_line_size)
2622 return -EINVAL;
2623
2624 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2625 equal to or multiple of the right value. */
2626 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2627 if (cacheline_size >= pci_cache_line_size &&
2628 (cacheline_size % pci_cache_line_size) == 0)
2629 return 0;
2630
2631 /* Write the correct value. */
2632 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2633 /* Read it back. */
2634 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2635 if (cacheline_size == pci_cache_line_size)
2636 return 0;
2637
2638 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2639 "supported\n", pci_cache_line_size << 2);
2640
2641 return -EINVAL;
2642 }
2643 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2644
2645 #ifdef PCI_DISABLE_MWI
2646 int pci_set_mwi(struct pci_dev *dev)
2647 {
2648 return 0;
2649 }
2650
2651 int pci_try_set_mwi(struct pci_dev *dev)
2652 {
2653 return 0;
2654 }
2655
2656 void pci_clear_mwi(struct pci_dev *dev)
2657 {
2658 }
2659
2660 #else
2661
2662 /**
2663 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2664 * @dev: the PCI device for which MWI is enabled
2665 *
2666 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2667 *
2668 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2669 */
2670 int
2671 pci_set_mwi(struct pci_dev *dev)
2672 {
2673 int rc;
2674 u16 cmd;
2675
2676 rc = pci_set_cacheline_size(dev);
2677 if (rc)
2678 return rc;
2679
2680 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2681 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2682 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2683 cmd |= PCI_COMMAND_INVALIDATE;
2684 pci_write_config_word(dev, PCI_COMMAND, cmd);
2685 }
2686
2687 return 0;
2688 }
2689
2690 /**
2691 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2692 * @dev: the PCI device for which MWI is enabled
2693 *
2694 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2695 * Callers are not required to check the return value.
2696 *
2697 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2698 */
2699 int pci_try_set_mwi(struct pci_dev *dev)
2700 {
2701 int rc = pci_set_mwi(dev);
2702 return rc;
2703 }
2704
2705 /**
2706 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2707 * @dev: the PCI device to disable
2708 *
2709 * Disables PCI Memory-Write-Invalidate transaction on the device
2710 */
2711 void
2712 pci_clear_mwi(struct pci_dev *dev)
2713 {
2714 u16 cmd;
2715
2716 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2717 if (cmd & PCI_COMMAND_INVALIDATE) {
2718 cmd &= ~PCI_COMMAND_INVALIDATE;
2719 pci_write_config_word(dev, PCI_COMMAND, cmd);
2720 }
2721 }
2722 #endif /* ! PCI_DISABLE_MWI */
2723
2724 /**
2725 * pci_intx - enables/disables PCI INTx for device dev
2726 * @pdev: the PCI device to operate on
2727 * @enable: boolean: whether to enable or disable PCI INTx
2728 *
2729 * Enables/disables PCI INTx for device dev
2730 */
2731 void
2732 pci_intx(struct pci_dev *pdev, int enable)
2733 {
2734 u16 pci_command, new;
2735
2736 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2737
2738 if (enable) {
2739 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2740 } else {
2741 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2742 }
2743
2744 if (new != pci_command) {
2745 struct pci_devres *dr;
2746
2747 pci_write_config_word(pdev, PCI_COMMAND, new);
2748
2749 dr = find_pci_dr(pdev);
2750 if (dr && !dr->restore_intx) {
2751 dr->restore_intx = 1;
2752 dr->orig_intx = !enable;
2753 }
2754 }
2755 }
2756
2757 /**
2758 * pci_msi_off - disables any msi or msix capabilities
2759 * @dev: the PCI device to operate on
2760 *
2761 * If you want to use msi see pci_enable_msi and friends.
2762 * This is a lower level primitive that allows us to disable
2763 * msi operation at the device level.
2764 */
2765 void pci_msi_off(struct pci_dev *dev)
2766 {
2767 int pos;
2768 u16 control;
2769
2770 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2771 if (pos) {
2772 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2773 control &= ~PCI_MSI_FLAGS_ENABLE;
2774 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2775 }
2776 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2777 if (pos) {
2778 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2779 control &= ~PCI_MSIX_FLAGS_ENABLE;
2780 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2781 }
2782 }
2783 EXPORT_SYMBOL_GPL(pci_msi_off);
2784
2785 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2786 {
2787 return dma_set_max_seg_size(&dev->dev, size);
2788 }
2789 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2790
2791 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2792 {
2793 return dma_set_seg_boundary(&dev->dev, mask);
2794 }
2795 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2796
2797 static int pcie_flr(struct pci_dev *dev, int probe)
2798 {
2799 int i;
2800 int pos;
2801 u32 cap;
2802 u16 status, control;
2803
2804 pos = pci_pcie_cap(dev);
2805 if (!pos)
2806 return -ENOTTY;
2807
2808 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2809 if (!(cap & PCI_EXP_DEVCAP_FLR))
2810 return -ENOTTY;
2811
2812 if (probe)
2813 return 0;
2814
2815 /* Wait for Transaction Pending bit clean */
2816 for (i = 0; i < 4; i++) {
2817 if (i)
2818 msleep((1 << (i - 1)) * 100);
2819
2820 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2821 if (!(status & PCI_EXP_DEVSTA_TRPND))
2822 goto clear;
2823 }
2824
2825 dev_err(&dev->dev, "transaction is not cleared; "
2826 "proceeding with reset anyway\n");
2827
2828 clear:
2829 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2830 control |= PCI_EXP_DEVCTL_BCR_FLR;
2831 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2832
2833 msleep(100);
2834
2835 return 0;
2836 }
2837
2838 static int pci_af_flr(struct pci_dev *dev, int probe)
2839 {
2840 int i;
2841 int pos;
2842 u8 cap;
2843 u8 status;
2844
2845 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2846 if (!pos)
2847 return -ENOTTY;
2848
2849 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2850 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2851 return -ENOTTY;
2852
2853 if (probe)
2854 return 0;
2855
2856 /* Wait for Transaction Pending bit clean */
2857 for (i = 0; i < 4; i++) {
2858 if (i)
2859 msleep((1 << (i - 1)) * 100);
2860
2861 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2862 if (!(status & PCI_AF_STATUS_TP))
2863 goto clear;
2864 }
2865
2866 dev_err(&dev->dev, "transaction is not cleared; "
2867 "proceeding with reset anyway\n");
2868
2869 clear:
2870 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2871 msleep(100);
2872
2873 return 0;
2874 }
2875
2876 /**
2877 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
2878 * @dev: Device to reset.
2879 * @probe: If set, only check if the device can be reset this way.
2880 *
2881 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
2882 * unset, it will be reinitialized internally when going from PCI_D3hot to
2883 * PCI_D0. If that's the case and the device is not in a low-power state
2884 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
2885 *
2886 * NOTE: This causes the caller to sleep for twice the device power transition
2887 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
2888 * by devault (i.e. unless the @dev's d3_delay field has a different value).
2889 * Moreover, only devices in D0 can be reset by this function.
2890 */
2891 static int pci_pm_reset(struct pci_dev *dev, int probe)
2892 {
2893 u16 csr;
2894
2895 if (!dev->pm_cap)
2896 return -ENOTTY;
2897
2898 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2899 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2900 return -ENOTTY;
2901
2902 if (probe)
2903 return 0;
2904
2905 if (dev->current_state != PCI_D0)
2906 return -EINVAL;
2907
2908 csr &= ~PCI_PM_CTRL_STATE_MASK;
2909 csr |= PCI_D3hot;
2910 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2911 pci_dev_d3_sleep(dev);
2912
2913 csr &= ~PCI_PM_CTRL_STATE_MASK;
2914 csr |= PCI_D0;
2915 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2916 pci_dev_d3_sleep(dev);
2917
2918 return 0;
2919 }
2920
2921 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2922 {
2923 u16 ctrl;
2924 struct pci_dev *pdev;
2925
2926 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2927 return -ENOTTY;
2928
2929 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2930 if (pdev != dev)
2931 return -ENOTTY;
2932
2933 if (probe)
2934 return 0;
2935
2936 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2937 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2938 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2939 msleep(100);
2940
2941 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2942 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2943 msleep(100);
2944
2945 return 0;
2946 }
2947
2948 static int pci_dev_reset(struct pci_dev *dev, int probe)
2949 {
2950 int rc;
2951
2952 might_sleep();
2953
2954 if (!probe) {
2955 pci_block_user_cfg_access(dev);
2956 /* block PM suspend, driver probe, etc. */
2957 device_lock(&dev->dev);
2958 }
2959
2960 rc = pci_dev_specific_reset(dev, probe);
2961 if (rc != -ENOTTY)
2962 goto done;
2963
2964 rc = pcie_flr(dev, probe);
2965 if (rc != -ENOTTY)
2966 goto done;
2967
2968 rc = pci_af_flr(dev, probe);
2969 if (rc != -ENOTTY)
2970 goto done;
2971
2972 rc = pci_pm_reset(dev, probe);
2973 if (rc != -ENOTTY)
2974 goto done;
2975
2976 rc = pci_parent_bus_reset(dev, probe);
2977 done:
2978 if (!probe) {
2979 device_unlock(&dev->dev);
2980 pci_unblock_user_cfg_access(dev);
2981 }
2982
2983 return rc;
2984 }
2985
2986 /**
2987 * __pci_reset_function - reset a PCI device function
2988 * @dev: PCI device to reset
2989 *
2990 * Some devices allow an individual function to be reset without affecting
2991 * other functions in the same device. The PCI device must be responsive
2992 * to PCI config space in order to use this function.
2993 *
2994 * The device function is presumed to be unused when this function is called.
2995 * Resetting the device will make the contents of PCI configuration space
2996 * random, so any caller of this must be prepared to reinitialise the
2997 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
2998 * etc.
2999 *
3000 * Returns 0 if the device function was successfully reset or negative if the
3001 * device doesn't support resetting a single function.
3002 */
3003 int __pci_reset_function(struct pci_dev *dev)
3004 {
3005 return pci_dev_reset(dev, 0);
3006 }
3007 EXPORT_SYMBOL_GPL(__pci_reset_function);
3008
3009 /**
3010 * pci_probe_reset_function - check whether the device can be safely reset
3011 * @dev: PCI device to reset
3012 *
3013 * Some devices allow an individual function to be reset without affecting
3014 * other functions in the same device. The PCI device must be responsive
3015 * to PCI config space in order to use this function.
3016 *
3017 * Returns 0 if the device function can be reset or negative if the
3018 * device doesn't support resetting a single function.
3019 */
3020 int pci_probe_reset_function(struct pci_dev *dev)
3021 {
3022 return pci_dev_reset(dev, 1);
3023 }
3024
3025 /**
3026 * pci_reset_function - quiesce and reset a PCI device function
3027 * @dev: PCI device to reset
3028 *
3029 * Some devices allow an individual function to be reset without affecting
3030 * other functions in the same device. The PCI device must be responsive
3031 * to PCI config space in order to use this function.
3032 *
3033 * This function does not just reset the PCI portion of a device, but
3034 * clears all the state associated with the device. This function differs
3035 * from __pci_reset_function in that it saves and restores device state
3036 * over the reset.
3037 *
3038 * Returns 0 if the device function was successfully reset or negative if the
3039 * device doesn't support resetting a single function.
3040 */
3041 int pci_reset_function(struct pci_dev *dev)
3042 {
3043 int rc;
3044
3045 rc = pci_dev_reset(dev, 1);
3046 if (rc)
3047 return rc;
3048
3049 pci_save_state(dev);
3050
3051 /*
3052 * both INTx and MSI are disabled after the Interrupt Disable bit
3053 * is set and the Bus Master bit is cleared.
3054 */
3055 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3056
3057 rc = pci_dev_reset(dev, 0);
3058
3059 pci_restore_state(dev);
3060
3061 return rc;
3062 }
3063 EXPORT_SYMBOL_GPL(pci_reset_function);
3064
3065 /**
3066 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3067 * @dev: PCI device to query
3068 *
3069 * Returns mmrbc: maximum designed memory read count in bytes
3070 * or appropriate error value.
3071 */
3072 int pcix_get_max_mmrbc(struct pci_dev *dev)
3073 {
3074 int cap;
3075 u32 stat;
3076
3077 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3078 if (!cap)
3079 return -EINVAL;
3080
3081 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3082 return -EINVAL;
3083
3084 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3085 }
3086 EXPORT_SYMBOL(pcix_get_max_mmrbc);
3087
3088 /**
3089 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3090 * @dev: PCI device to query
3091 *
3092 * Returns mmrbc: maximum memory read count in bytes
3093 * or appropriate error value.
3094 */
3095 int pcix_get_mmrbc(struct pci_dev *dev)
3096 {
3097 int cap;
3098 u16 cmd;
3099
3100 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3101 if (!cap)
3102 return -EINVAL;
3103
3104 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3105 return -EINVAL;
3106
3107 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3108 }
3109 EXPORT_SYMBOL(pcix_get_mmrbc);
3110
3111 /**
3112 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3113 * @dev: PCI device to query
3114 * @mmrbc: maximum memory read count in bytes
3115 * valid values are 512, 1024, 2048, 4096
3116 *
3117 * If possible sets maximum memory read byte count, some bridges have erratas
3118 * that prevent this.
3119 */
3120 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3121 {
3122 int cap;
3123 u32 stat, v, o;
3124 u16 cmd;
3125
3126 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3127 return -EINVAL;
3128
3129 v = ffs(mmrbc) - 10;
3130
3131 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3132 if (!cap)
3133 return -EINVAL;
3134
3135 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3136 return -EINVAL;
3137
3138 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3139 return -E2BIG;
3140
3141 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3142 return -EINVAL;
3143
3144 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3145 if (o != v) {
3146 if (v > o && dev->bus &&
3147 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3148 return -EIO;
3149
3150 cmd &= ~PCI_X_CMD_MAX_READ;
3151 cmd |= v << 2;
3152 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3153 return -EIO;
3154 }
3155 return 0;
3156 }
3157 EXPORT_SYMBOL(pcix_set_mmrbc);
3158
3159 /**
3160 * pcie_get_readrq - get PCI Express read request size
3161 * @dev: PCI device to query
3162 *
3163 * Returns maximum memory read request in bytes
3164 * or appropriate error value.
3165 */
3166 int pcie_get_readrq(struct pci_dev *dev)
3167 {
3168 int ret, cap;
3169 u16 ctl;
3170
3171 cap = pci_pcie_cap(dev);
3172 if (!cap)
3173 return -EINVAL;
3174
3175 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3176 if (!ret)
3177 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3178
3179 return ret;
3180 }
3181 EXPORT_SYMBOL(pcie_get_readrq);
3182
3183 /**
3184 * pcie_set_readrq - set PCI Express maximum memory read request
3185 * @dev: PCI device to query
3186 * @rq: maximum memory read count in bytes
3187 * valid values are 128, 256, 512, 1024, 2048, 4096
3188 *
3189 * If possible sets maximum read byte count
3190 */
3191 int pcie_set_readrq(struct pci_dev *dev, int rq)
3192 {
3193 int cap, err = -EINVAL;
3194 u16 ctl, v;
3195
3196 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3197 goto out;
3198
3199 v = (ffs(rq) - 8) << 12;
3200
3201 cap = pci_pcie_cap(dev);
3202 if (!cap)
3203 goto out;
3204
3205 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3206 if (err)
3207 goto out;
3208
3209 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3210 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3211 ctl |= v;
3212 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
3213 }
3214
3215 out:
3216 return err;
3217 }
3218 EXPORT_SYMBOL(pcie_set_readrq);
3219
3220 /**
3221 * pci_select_bars - Make BAR mask from the type of resource
3222 * @dev: the PCI device for which BAR mask is made
3223 * @flags: resource type mask to be selected
3224 *
3225 * This helper routine makes bar mask from the type of resource.
3226 */
3227 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3228 {
3229 int i, bars = 0;
3230 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3231 if (pci_resource_flags(dev, i) & flags)
3232 bars |= (1 << i);
3233 return bars;
3234 }
3235
3236 /**
3237 * pci_resource_bar - get position of the BAR associated with a resource
3238 * @dev: the PCI device
3239 * @resno: the resource number
3240 * @type: the BAR type to be filled in
3241 *
3242 * Returns BAR position in config space, or 0 if the BAR is invalid.
3243 */
3244 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3245 {
3246 int reg;
3247
3248 if (resno < PCI_ROM_RESOURCE) {
3249 *type = pci_bar_unknown;
3250 return PCI_BASE_ADDRESS_0 + 4 * resno;
3251 } else if (resno == PCI_ROM_RESOURCE) {
3252 *type = pci_bar_mem32;
3253 return dev->rom_base_reg;
3254 } else if (resno < PCI_BRIDGE_RESOURCES) {
3255 /* device specific resource */
3256 reg = pci_iov_resource_bar(dev, resno, type);
3257 if (reg)
3258 return reg;
3259 }
3260
3261 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3262 return 0;
3263 }
3264
3265 /* Some architectures require additional programming to enable VGA */
3266 static arch_set_vga_state_t arch_set_vga_state;
3267
3268 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3269 {
3270 arch_set_vga_state = func; /* NULL disables */
3271 }
3272
3273 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3274 unsigned int command_bits, u32 flags)
3275 {
3276 if (arch_set_vga_state)
3277 return arch_set_vga_state(dev, decode, command_bits,
3278 flags);
3279 return 0;
3280 }
3281
3282 /**
3283 * pci_set_vga_state - set VGA decode state on device and parents if requested
3284 * @dev: the PCI device
3285 * @decode: true = enable decoding, false = disable decoding
3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3287 * @flags: traverse ancestors and change bridges
3288 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3289 */
3290 int pci_set_vga_state(struct pci_dev *dev, bool decode,
3291 unsigned int command_bits, u32 flags)
3292 {
3293 struct pci_bus *bus;
3294 struct pci_dev *bridge;
3295 u16 cmd;
3296 int rc;
3297
3298 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3299
3300 /* ARCH specific VGA enables */
3301 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3302 if (rc)
3303 return rc;
3304
3305 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3306 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3307 if (decode == true)
3308 cmd |= command_bits;
3309 else
3310 cmd &= ~command_bits;
3311 pci_write_config_word(dev, PCI_COMMAND, cmd);
3312 }
3313
3314 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3315 return 0;
3316
3317 bus = dev->bus;
3318 while (bus) {
3319 bridge = bus->self;
3320 if (bridge) {
3321 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3322 &cmd);
3323 if (decode == true)
3324 cmd |= PCI_BRIDGE_CTL_VGA;
3325 else
3326 cmd &= ~PCI_BRIDGE_CTL_VGA;
3327 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3328 cmd);
3329 }
3330 bus = bus->parent;
3331 }
3332 return 0;
3333 }
3334
3335 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3336 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3337 static DEFINE_SPINLOCK(resource_alignment_lock);
3338
3339 /**
3340 * pci_specified_resource_alignment - get resource alignment specified by user.
3341 * @dev: the PCI device to get
3342 *
3343 * RETURNS: Resource alignment if it is specified.
3344 * Zero if it is not specified.
3345 */
3346 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3347 {
3348 int seg, bus, slot, func, align_order, count;
3349 resource_size_t align = 0;
3350 char *p;
3351
3352 spin_lock(&resource_alignment_lock);
3353 p = resource_alignment_param;
3354 while (*p) {
3355 count = 0;
3356 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3357 p[count] == '@') {
3358 p += count + 1;
3359 } else {
3360 align_order = -1;
3361 }
3362 if (sscanf(p, "%x:%x:%x.%x%n",
3363 &seg, &bus, &slot, &func, &count) != 4) {
3364 seg = 0;
3365 if (sscanf(p, "%x:%x.%x%n",
3366 &bus, &slot, &func, &count) != 3) {
3367 /* Invalid format */
3368 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3369 p);
3370 break;
3371 }
3372 }
3373 p += count;
3374 if (seg == pci_domain_nr(dev->bus) &&
3375 bus == dev->bus->number &&
3376 slot == PCI_SLOT(dev->devfn) &&
3377 func == PCI_FUNC(dev->devfn)) {
3378 if (align_order == -1) {
3379 align = PAGE_SIZE;
3380 } else {
3381 align = 1 << align_order;
3382 }
3383 /* Found */
3384 break;
3385 }
3386 if (*p != ';' && *p != ',') {
3387 /* End of param or invalid format */
3388 break;
3389 }
3390 p++;
3391 }
3392 spin_unlock(&resource_alignment_lock);
3393 return align;
3394 }
3395
3396 /**
3397 * pci_is_reassigndev - check if specified PCI is target device to reassign
3398 * @dev: the PCI device to check
3399 *
3400 * RETURNS: non-zero for PCI device is a target device to reassign,
3401 * or zero is not.
3402 */
3403 int pci_is_reassigndev(struct pci_dev *dev)
3404 {
3405 return (pci_specified_resource_alignment(dev) != 0);
3406 }
3407
3408 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3409 {
3410 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3411 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3412 spin_lock(&resource_alignment_lock);
3413 strncpy(resource_alignment_param, buf, count);
3414 resource_alignment_param[count] = '\0';
3415 spin_unlock(&resource_alignment_lock);
3416 return count;
3417 }
3418
3419 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3420 {
3421 size_t count;
3422 spin_lock(&resource_alignment_lock);
3423 count = snprintf(buf, size, "%s", resource_alignment_param);
3424 spin_unlock(&resource_alignment_lock);
3425 return count;
3426 }
3427
3428 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3429 {
3430 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3431 }
3432
3433 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3434 const char *buf, size_t count)
3435 {
3436 return pci_set_resource_alignment_param(buf, count);
3437 }
3438
3439 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3440 pci_resource_alignment_store);
3441
3442 static int __init pci_resource_alignment_sysfs_init(void)
3443 {
3444 return bus_create_file(&pci_bus_type,
3445 &bus_attr_resource_alignment);
3446 }
3447
3448 late_initcall(pci_resource_alignment_sysfs_init);
3449
3450 static void __devinit pci_no_domains(void)
3451 {
3452 #ifdef CONFIG_PCI_DOMAINS
3453 pci_domains_supported = 0;
3454 #endif
3455 }
3456
3457 /**
3458 * pci_ext_cfg_enabled - can we access extended PCI config space?
3459 * @dev: The PCI device of the root bridge.
3460 *
3461 * Returns 1 if we can access PCI extended config space (offsets
3462 * greater than 0xff). This is the default implementation. Architecture
3463 * implementations can override this.
3464 */
3465 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3466 {
3467 return 1;
3468 }
3469
3470 void __weak pci_fixup_cardbus(struct pci_bus *bus)
3471 {
3472 }
3473 EXPORT_SYMBOL(pci_fixup_cardbus);
3474
3475 static int __init pci_setup(char *str)
3476 {
3477 while (str) {
3478 char *k = strchr(str, ',');
3479 if (k)
3480 *k++ = 0;
3481 if (*str && (str = pcibios_setup(str)) && *str) {
3482 if (!strcmp(str, "nomsi")) {
3483 pci_no_msi();
3484 } else if (!strcmp(str, "noaer")) {
3485 pci_no_aer();
3486 } else if (!strncmp(str, "realloc", 7)) {
3487 pci_realloc();
3488 } else if (!strcmp(str, "nodomains")) {
3489 pci_no_domains();
3490 } else if (!strncmp(str, "cbiosize=", 9)) {
3491 pci_cardbus_io_size = memparse(str + 9, &str);
3492 } else if (!strncmp(str, "cbmemsize=", 10)) {
3493 pci_cardbus_mem_size = memparse(str + 10, &str);
3494 } else if (!strncmp(str, "resource_alignment=", 19)) {
3495 pci_set_resource_alignment_param(str + 19,
3496 strlen(str + 19));
3497 } else if (!strncmp(str, "ecrc=", 5)) {
3498 pcie_ecrc_get_policy(str + 5);
3499 } else if (!strncmp(str, "hpiosize=", 9)) {
3500 pci_hotplug_io_size = memparse(str + 9, &str);
3501 } else if (!strncmp(str, "hpmemsize=", 10)) {
3502 pci_hotplug_mem_size = memparse(str + 10, &str);
3503 } else {
3504 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3505 str);
3506 }
3507 }
3508 str = k;
3509 }
3510 return 0;
3511 }
3512 early_param("pci", pci_setup);
3513
3514 EXPORT_SYMBOL(pci_reenable_device);
3515 EXPORT_SYMBOL(pci_enable_device_io);
3516 EXPORT_SYMBOL(pci_enable_device_mem);
3517 EXPORT_SYMBOL(pci_enable_device);
3518 EXPORT_SYMBOL(pcim_enable_device);
3519 EXPORT_SYMBOL(pcim_pin_device);
3520 EXPORT_SYMBOL(pci_disable_device);
3521 EXPORT_SYMBOL(pci_find_capability);
3522 EXPORT_SYMBOL(pci_bus_find_capability);
3523 EXPORT_SYMBOL(pci_release_regions);
3524 EXPORT_SYMBOL(pci_request_regions);
3525 EXPORT_SYMBOL(pci_request_regions_exclusive);
3526 EXPORT_SYMBOL(pci_release_region);
3527 EXPORT_SYMBOL(pci_request_region);
3528 EXPORT_SYMBOL(pci_request_region_exclusive);
3529 EXPORT_SYMBOL(pci_release_selected_regions);
3530 EXPORT_SYMBOL(pci_request_selected_regions);
3531 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3532 EXPORT_SYMBOL(pci_set_master);
3533 EXPORT_SYMBOL(pci_clear_master);
3534 EXPORT_SYMBOL(pci_set_mwi);
3535 EXPORT_SYMBOL(pci_try_set_mwi);
3536 EXPORT_SYMBOL(pci_clear_mwi);
3537 EXPORT_SYMBOL_GPL(pci_intx);
3538 EXPORT_SYMBOL(pci_assign_resource);
3539 EXPORT_SYMBOL(pci_find_parent_resource);
3540 EXPORT_SYMBOL(pci_select_bars);
3541
3542 EXPORT_SYMBOL(pci_set_power_state);
3543 EXPORT_SYMBOL(pci_save_state);
3544 EXPORT_SYMBOL(pci_restore_state);
3545 EXPORT_SYMBOL(pci_pme_capable);
3546 EXPORT_SYMBOL(pci_pme_active);
3547 EXPORT_SYMBOL(pci_wake_from_d3);
3548 EXPORT_SYMBOL(pci_target_state);
3549 EXPORT_SYMBOL(pci_prepare_to_sleep);
3550 EXPORT_SYMBOL(pci_back_from_sleep);
3551 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);