]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * drivers/pci/setup-bus.c | |
3 | * | |
4 | * Extruded from code written by | |
5 | * Dave Rusling (david.rusling@reo.mts.dec.com) | |
6 | * David Mosberger (davidm@cs.arizona.edu) | |
7 | * David Miller (davem@redhat.com) | |
8 | * | |
9 | * Support routines for initializing a PCI subsystem. | |
10 | */ | |
11 | ||
12 | /* | |
13 | * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> | |
14 | * PCI-PCI bridges cleanup, sorted resource allocation. | |
15 | * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru> | |
16 | * Converted to allocation in 3 passes, which gives | |
17 | * tighter packing. Prefetchable range support. | |
18 | */ | |
19 | ||
20 | #include <linux/init.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/pci.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/ioport.h> | |
26 | #include <linux/cache.h> | |
27 | #include <linux/slab.h> | |
28 | #include "pci.h" | |
29 | ||
30 | struct resource_list_x { | |
31 | struct resource_list_x *next; | |
32 | struct resource *res; | |
33 | struct pci_dev *dev; | |
34 | resource_size_t start; | |
35 | resource_size_t end; | |
36 | resource_size_t add_size; | |
37 | resource_size_t min_align; | |
38 | unsigned long flags; | |
39 | }; | |
40 | ||
41 | #define free_list(type, head) do { \ | |
42 | struct type *list, *tmp; \ | |
43 | for (list = (head)->next; list;) { \ | |
44 | tmp = list; \ | |
45 | list = list->next; \ | |
46 | kfree(tmp); \ | |
47 | } \ | |
48 | (head)->next = NULL; \ | |
49 | } while (0) | |
50 | ||
51 | int pci_realloc_enable = 0; | |
52 | #define pci_realloc_enabled() pci_realloc_enable | |
53 | void pci_realloc(void) | |
54 | { | |
55 | pci_realloc_enable = 1; | |
56 | } | |
57 | ||
58 | /** | |
59 | * add_to_list() - add a new resource tracker to the list | |
60 | * @head: Head of the list | |
61 | * @dev: device corresponding to which the resource | |
62 | * belongs | |
63 | * @res: The resource to be tracked | |
64 | * @add_size: additional size to be optionally added | |
65 | * to the resource | |
66 | */ | |
67 | static int add_to_list(struct resource_list_x *head, | |
68 | struct pci_dev *dev, struct resource *res, | |
69 | resource_size_t add_size, resource_size_t min_align) | |
70 | { | |
71 | struct resource_list_x *list = head; | |
72 | struct resource_list_x *ln = list->next; | |
73 | struct resource_list_x *tmp; | |
74 | ||
75 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | |
76 | if (!tmp) { | |
77 | pr_warning("add_to_list: kmalloc() failed!\n"); | |
78 | return -ENOMEM; | |
79 | } | |
80 | ||
81 | tmp->next = ln; | |
82 | tmp->res = res; | |
83 | tmp->dev = dev; | |
84 | tmp->start = res->start; | |
85 | tmp->end = res->end; | |
86 | tmp->flags = res->flags; | |
87 | tmp->add_size = add_size; | |
88 | tmp->min_align = min_align; | |
89 | list->next = tmp; | |
90 | ||
91 | return 0; | |
92 | } | |
93 | ||
94 | static void add_to_failed_list(struct resource_list_x *head, | |
95 | struct pci_dev *dev, struct resource *res) | |
96 | { | |
97 | add_to_list(head, dev, res, | |
98 | 0 /* dont care */, | |
99 | 0 /* dont care */); | |
100 | } | |
101 | ||
102 | static void remove_from_list(struct resource_list_x *realloc_head, | |
103 | struct resource *res) | |
104 | { | |
105 | struct resource_list_x *prev, *tmp, *list; | |
106 | ||
107 | prev = realloc_head; | |
108 | for (list = realloc_head->next; list;) { | |
109 | if (list->res != res) { | |
110 | prev = list; | |
111 | list = list->next; | |
112 | continue; | |
113 | } | |
114 | tmp = list; | |
115 | prev->next = list = list->next; | |
116 | kfree(tmp); | |
117 | } | |
118 | } | |
119 | ||
120 | static resource_size_t get_res_add_size(struct resource_list_x *realloc_head, | |
121 | struct resource *res) | |
122 | { | |
123 | struct resource_list_x *list; | |
124 | ||
125 | /* check if it is in realloc_head list */ | |
126 | for (list = realloc_head->next; list && list->res != res; | |
127 | list = list->next) | |
128 | ; | |
129 | ||
130 | if (list) { | |
131 | dev_printk(KERN_DEBUG, &list->dev->dev, | |
132 | "%pR get_res_add_size add_size %llx\n", | |
133 | list->res, (unsigned long long)list->add_size); | |
134 | return list->add_size; | |
135 | } | |
136 | ||
137 | return 0; | |
138 | } | |
139 | ||
140 | static void __dev_sort_resources(struct pci_dev *dev, | |
141 | struct resource_list *head) | |
142 | { | |
143 | u16 class = dev->class >> 8; | |
144 | ||
145 | /* Don't touch classless devices or host bridges or ioapics. */ | |
146 | if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) | |
147 | return; | |
148 | ||
149 | /* Don't touch ioapic devices already enabled by firmware */ | |
150 | if (class == PCI_CLASS_SYSTEM_PIC) { | |
151 | u16 command; | |
152 | pci_read_config_word(dev, PCI_COMMAND, &command); | |
153 | if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) | |
154 | return; | |
155 | } | |
156 | ||
157 | pdev_sort_resources(dev, head); | |
158 | } | |
159 | ||
160 | static inline void reset_resource(struct resource *res) | |
161 | { | |
162 | res->start = 0; | |
163 | res->end = 0; | |
164 | res->flags = 0; | |
165 | } | |
166 | ||
167 | /** | |
168 | * reassign_resources_sorted() - satisfy any additional resource requests | |
169 | * | |
170 | * @realloc_head : head of the list tracking requests requiring additional | |
171 | * resources | |
172 | * @head : head of the list tracking requests with allocated | |
173 | * resources | |
174 | * | |
175 | * Walk through each element of the realloc_head and try to procure | |
176 | * additional resources for the element, provided the element | |
177 | * is in the head list. | |
178 | */ | |
179 | static void reassign_resources_sorted(struct resource_list_x *realloc_head, | |
180 | struct resource_list *head) | |
181 | { | |
182 | struct resource *res; | |
183 | struct resource_list_x *list, *tmp, *prev; | |
184 | struct resource_list *hlist; | |
185 | resource_size_t add_size; | |
186 | int idx; | |
187 | ||
188 | prev = realloc_head; | |
189 | for (list = realloc_head->next; list;) { | |
190 | res = list->res; | |
191 | /* skip resource that has been reset */ | |
192 | if (!res->flags) | |
193 | goto out; | |
194 | ||
195 | /* skip this resource if not found in head list */ | |
196 | for (hlist = head->next; hlist && hlist->res != res; | |
197 | hlist = hlist->next); | |
198 | if (!hlist) { /* just skip */ | |
199 | prev = list; | |
200 | list = list->next; | |
201 | continue; | |
202 | } | |
203 | ||
204 | idx = res - &list->dev->resource[0]; | |
205 | add_size=list->add_size; | |
206 | if (!resource_size(res)) { | |
207 | res->start = list->start; | |
208 | res->end = res->start + add_size - 1; | |
209 | if(pci_assign_resource(list->dev, idx)) | |
210 | reset_resource(res); | |
211 | } else { | |
212 | resource_size_t align = list->min_align; | |
213 | res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); | |
214 | if (pci_reassign_resource(list->dev, idx, add_size, align)) | |
215 | dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n", | |
216 | res); | |
217 | } | |
218 | out: | |
219 | tmp = list; | |
220 | prev->next = list = list->next; | |
221 | kfree(tmp); | |
222 | } | |
223 | } | |
224 | ||
225 | /** | |
226 | * assign_requested_resources_sorted() - satisfy resource requests | |
227 | * | |
228 | * @head : head of the list tracking requests for resources | |
229 | * @failed_list : head of the list tracking requests that could | |
230 | * not be allocated | |
231 | * | |
232 | * Satisfy resource requests of each element in the list. Add | |
233 | * requests that could not satisfied to the failed_list. | |
234 | */ | |
235 | static void assign_requested_resources_sorted(struct resource_list *head, | |
236 | struct resource_list_x *fail_head) | |
237 | { | |
238 | struct resource *res; | |
239 | struct resource_list *list; | |
240 | int idx; | |
241 | ||
242 | for (list = head->next; list; list = list->next) { | |
243 | res = list->res; | |
244 | idx = res - &list->dev->resource[0]; | |
245 | if (resource_size(res) && pci_assign_resource(list->dev, idx)) { | |
246 | if (fail_head && !pci_is_root_bus(list->dev->bus)) { | |
247 | /* | |
248 | * if the failed res is for ROM BAR, and it will | |
249 | * be enabled later, don't add it to the list | |
250 | */ | |
251 | if (!((idx == PCI_ROM_RESOURCE) && | |
252 | (!(res->flags & IORESOURCE_ROM_ENABLE)))) | |
253 | add_to_failed_list(fail_head, list->dev, res); | |
254 | } | |
255 | reset_resource(res); | |
256 | } | |
257 | } | |
258 | } | |
259 | ||
260 | static void __assign_resources_sorted(struct resource_list *head, | |
261 | struct resource_list_x *realloc_head, | |
262 | struct resource_list_x *fail_head) | |
263 | { | |
264 | /* | |
265 | * Should not assign requested resources at first. | |
266 | * they could be adjacent, so later reassign can not reallocate | |
267 | * them one by one in parent resource window. | |
268 | * Try to assign requested + add_size at begining | |
269 | * if could do that, could get out early. | |
270 | * if could not do that, we still try to assign requested at first, | |
271 | * then try to reassign add_size for some resources. | |
272 | */ | |
273 | struct resource_list_x save_head, local_fail_head, *list; | |
274 | struct resource_list *l; | |
275 | ||
276 | /* Check if optional add_size is there */ | |
277 | if (!realloc_head || !realloc_head->next) | |
278 | goto requested_and_reassign; | |
279 | ||
280 | /* Save original start, end, flags etc at first */ | |
281 | save_head.next = NULL; | |
282 | for (l = head->next; l; l = l->next) | |
283 | if (add_to_list(&save_head, l->dev, l->res, 0, 0)) { | |
284 | free_list(resource_list_x, &save_head); | |
285 | goto requested_and_reassign; | |
286 | } | |
287 | ||
288 | /* Update res in head list with add_size in realloc_head list */ | |
289 | for (l = head->next; l; l = l->next) | |
290 | l->res->end += get_res_add_size(realloc_head, l->res); | |
291 | ||
292 | /* Try updated head list with add_size added */ | |
293 | local_fail_head.next = NULL; | |
294 | assign_requested_resources_sorted(head, &local_fail_head); | |
295 | ||
296 | /* all assigned with add_size ? */ | |
297 | if (!local_fail_head.next) { | |
298 | /* Remove head list from realloc_head list */ | |
299 | for (l = head->next; l; l = l->next) | |
300 | remove_from_list(realloc_head, l->res); | |
301 | free_list(resource_list_x, &save_head); | |
302 | free_list(resource_list, head); | |
303 | return; | |
304 | } | |
305 | ||
306 | free_list(resource_list_x, &local_fail_head); | |
307 | /* Release assigned resource */ | |
308 | for (l = head->next; l; l = l->next) | |
309 | if (l->res->parent) | |
310 | release_resource(l->res); | |
311 | /* Restore start/end/flags from saved list */ | |
312 | for (list = save_head.next; list; list = list->next) { | |
313 | struct resource *res = list->res; | |
314 | ||
315 | res->start = list->start; | |
316 | res->end = list->end; | |
317 | res->flags = list->flags; | |
318 | } | |
319 | free_list(resource_list_x, &save_head); | |
320 | ||
321 | requested_and_reassign: | |
322 | /* Satisfy the must-have resource requests */ | |
323 | assign_requested_resources_sorted(head, fail_head); | |
324 | ||
325 | /* Try to satisfy any additional optional resource | |
326 | requests */ | |
327 | if (realloc_head) | |
328 | reassign_resources_sorted(realloc_head, head); | |
329 | free_list(resource_list, head); | |
330 | } | |
331 | ||
332 | static void pdev_assign_resources_sorted(struct pci_dev *dev, | |
333 | struct resource_list_x *add_head, | |
334 | struct resource_list_x *fail_head) | |
335 | { | |
336 | struct resource_list head; | |
337 | ||
338 | head.next = NULL; | |
339 | __dev_sort_resources(dev, &head); | |
340 | __assign_resources_sorted(&head, add_head, fail_head); | |
341 | ||
342 | } | |
343 | ||
344 | static void pbus_assign_resources_sorted(const struct pci_bus *bus, | |
345 | struct resource_list_x *realloc_head, | |
346 | struct resource_list_x *fail_head) | |
347 | { | |
348 | struct pci_dev *dev; | |
349 | struct resource_list head; | |
350 | ||
351 | head.next = NULL; | |
352 | list_for_each_entry(dev, &bus->devices, bus_list) | |
353 | __dev_sort_resources(dev, &head); | |
354 | ||
355 | __assign_resources_sorted(&head, realloc_head, fail_head); | |
356 | } | |
357 | ||
358 | void pci_setup_cardbus(struct pci_bus *bus) | |
359 | { | |
360 | struct pci_dev *bridge = bus->self; | |
361 | struct resource *res; | |
362 | struct pci_bus_region region; | |
363 | ||
364 | dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n", | |
365 | bus->secondary, bus->subordinate); | |
366 | ||
367 | res = bus->resource[0]; | |
368 | pcibios_resource_to_bus(bridge, ®ion, res); | |
369 | if (res->flags & IORESOURCE_IO) { | |
370 | /* | |
371 | * The IO resource is allocated a range twice as large as it | |
372 | * would normally need. This allows us to set both IO regs. | |
373 | */ | |
374 | dev_info(&bridge->dev, " bridge window %pR\n", res); | |
375 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, | |
376 | region.start); | |
377 | pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, | |
378 | region.end); | |
379 | } | |
380 | ||
381 | res = bus->resource[1]; | |
382 | pcibios_resource_to_bus(bridge, ®ion, res); | |
383 | if (res->flags & IORESOURCE_IO) { | |
384 | dev_info(&bridge->dev, " bridge window %pR\n", res); | |
385 | pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, | |
386 | region.start); | |
387 | pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, | |
388 | region.end); | |
389 | } | |
390 | ||
391 | res = bus->resource[2]; | |
392 | pcibios_resource_to_bus(bridge, ®ion, res); | |
393 | if (res->flags & IORESOURCE_MEM) { | |
394 | dev_info(&bridge->dev, " bridge window %pR\n", res); | |
395 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, | |
396 | region.start); | |
397 | pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, | |
398 | region.end); | |
399 | } | |
400 | ||
401 | res = bus->resource[3]; | |
402 | pcibios_resource_to_bus(bridge, ®ion, res); | |
403 | if (res->flags & IORESOURCE_MEM) { | |
404 | dev_info(&bridge->dev, " bridge window %pR\n", res); | |
405 | pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, | |
406 | region.start); | |
407 | pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, | |
408 | region.end); | |
409 | } | |
410 | } | |
411 | EXPORT_SYMBOL(pci_setup_cardbus); | |
412 | ||
413 | /* Initialize bridges with base/limit values we have collected. | |
414 | PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998) | |
415 | requires that if there is no I/O ports or memory behind the | |
416 | bridge, corresponding range must be turned off by writing base | |
417 | value greater than limit to the bridge's base/limit registers. | |
418 | ||
419 | Note: care must be taken when updating I/O base/limit registers | |
420 | of bridges which support 32-bit I/O. This update requires two | |
421 | config space writes, so it's quite possible that an I/O window of | |
422 | the bridge will have some undesirable address (e.g. 0) after the | |
423 | first write. Ditto 64-bit prefetchable MMIO. */ | |
424 | static void pci_setup_bridge_io(struct pci_bus *bus) | |
425 | { | |
426 | struct pci_dev *bridge = bus->self; | |
427 | struct resource *res; | |
428 | struct pci_bus_region region; | |
429 | u32 l, io_upper16; | |
430 | ||
431 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | |
432 | res = bus->resource[0]; | |
433 | pcibios_resource_to_bus(bridge, ®ion, res); | |
434 | if (res->flags & IORESOURCE_IO) { | |
435 | pci_read_config_dword(bridge, PCI_IO_BASE, &l); | |
436 | l &= 0xffff0000; | |
437 | l |= (region.start >> 8) & 0x00f0; | |
438 | l |= region.end & 0xf000; | |
439 | /* Set up upper 16 bits of I/O base/limit. */ | |
440 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | |
441 | dev_info(&bridge->dev, " bridge window %pR\n", res); | |
442 | } else { | |
443 | /* Clear upper 16 bits of I/O base/limit. */ | |
444 | io_upper16 = 0; | |
445 | l = 0x00f0; | |
446 | } | |
447 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ | |
448 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); | |
449 | /* Update lower 16 bits of I/O base/limit. */ | |
450 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | |
451 | /* Update upper 16 bits of I/O base/limit. */ | |
452 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | |
453 | } | |
454 | ||
455 | static void pci_setup_bridge_mmio(struct pci_bus *bus) | |
456 | { | |
457 | struct pci_dev *bridge = bus->self; | |
458 | struct resource *res; | |
459 | struct pci_bus_region region; | |
460 | u32 l; | |
461 | ||
462 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ | |
463 | res = bus->resource[1]; | |
464 | pcibios_resource_to_bus(bridge, ®ion, res); | |
465 | if (res->flags & IORESOURCE_MEM) { | |
466 | l = (region.start >> 16) & 0xfff0; | |
467 | l |= region.end & 0xfff00000; | |
468 | dev_info(&bridge->dev, " bridge window %pR\n", res); | |
469 | } else { | |
470 | l = 0x0000fff0; | |
471 | } | |
472 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); | |
473 | } | |
474 | ||
475 | static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | |
476 | { | |
477 | struct pci_dev *bridge = bus->self; | |
478 | struct resource *res; | |
479 | struct pci_bus_region region; | |
480 | u32 l, bu, lu; | |
481 | ||
482 | /* Clear out the upper 32 bits of PREF limit. | |
483 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily | |
484 | disables PREF range, which is ok. */ | |
485 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); | |
486 | ||
487 | /* Set up PREF base/limit. */ | |
488 | bu = lu = 0; | |
489 | res = bus->resource[2]; | |
490 | pcibios_resource_to_bus(bridge, ®ion, res); | |
491 | if (res->flags & IORESOURCE_PREFETCH) { | |
492 | l = (region.start >> 16) & 0xfff0; | |
493 | l |= region.end & 0xfff00000; | |
494 | if (res->flags & IORESOURCE_MEM_64) { | |
495 | bu = upper_32_bits(region.start); | |
496 | lu = upper_32_bits(region.end); | |
497 | } | |
498 | dev_info(&bridge->dev, " bridge window %pR\n", res); | |
499 | } else { | |
500 | l = 0x0000fff0; | |
501 | } | |
502 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); | |
503 | ||
504 | /* Set the upper 32 bits of PREF base & limit. */ | |
505 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | |
506 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | |
507 | } | |
508 | ||
509 | static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | |
510 | { | |
511 | struct pci_dev *bridge = bus->self; | |
512 | ||
513 | dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", | |
514 | bus->secondary, bus->subordinate); | |
515 | ||
516 | if (type & IORESOURCE_IO) | |
517 | pci_setup_bridge_io(bus); | |
518 | ||
519 | if (type & IORESOURCE_MEM) | |
520 | pci_setup_bridge_mmio(bus); | |
521 | ||
522 | if (type & IORESOURCE_PREFETCH) | |
523 | pci_setup_bridge_mmio_pref(bus); | |
524 | ||
525 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | |
526 | } | |
527 | ||
528 | void pci_setup_bridge(struct pci_bus *bus) | |
529 | { | |
530 | unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | | |
531 | IORESOURCE_PREFETCH; | |
532 | ||
533 | __pci_setup_bridge(bus, type); | |
534 | } | |
535 | ||
536 | /* Check whether the bridge supports optional I/O and | |
537 | prefetchable memory ranges. If not, the respective | |
538 | base/limit registers must be read-only and read as 0. */ | |
539 | static void pci_bridge_check_ranges(struct pci_bus *bus) | |
540 | { | |
541 | u16 io; | |
542 | u32 pmem; | |
543 | struct pci_dev *bridge = bus->self; | |
544 | struct resource *b_res; | |
545 | ||
546 | b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; | |
547 | b_res[1].flags |= IORESOURCE_MEM; | |
548 | ||
549 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | |
550 | if (!io) { | |
551 | pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); | |
552 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | |
553 | pci_write_config_word(bridge, PCI_IO_BASE, 0x0); | |
554 | } | |
555 | if (io) | |
556 | b_res[0].flags |= IORESOURCE_IO; | |
557 | /* DECchip 21050 pass 2 errata: the bridge may miss an address | |
558 | disconnect boundary by one PCI data phase. | |
559 | Workaround: do not use prefetching on this device. */ | |
560 | if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) | |
561 | return; | |
562 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | |
563 | if (!pmem) { | |
564 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, | |
565 | 0xfff0fff0); | |
566 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | |
567 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); | |
568 | } | |
569 | if (pmem) { | |
570 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; | |
571 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == | |
572 | PCI_PREF_RANGE_TYPE_64) { | |
573 | b_res[2].flags |= IORESOURCE_MEM_64; | |
574 | b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; | |
575 | } | |
576 | } | |
577 | ||
578 | /* double check if bridge does support 64 bit pref */ | |
579 | if (b_res[2].flags & IORESOURCE_MEM_64) { | |
580 | u32 mem_base_hi, tmp; | |
581 | pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, | |
582 | &mem_base_hi); | |
583 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, | |
584 | 0xffffffff); | |
585 | pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); | |
586 | if (!tmp) | |
587 | b_res[2].flags &= ~IORESOURCE_MEM_64; | |
588 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, | |
589 | mem_base_hi); | |
590 | } | |
591 | } | |
592 | ||
593 | /* Helper function for sizing routines: find first available | |
594 | bus resource of a given type. Note: we intentionally skip | |
595 | the bus resources which have already been assigned (that is, | |
596 | have non-NULL parent resource). */ | |
597 | static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type) | |
598 | { | |
599 | int i; | |
600 | struct resource *r; | |
601 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | |
602 | IORESOURCE_PREFETCH; | |
603 | ||
604 | pci_bus_for_each_resource(bus, r, i) { | |
605 | if (r == &ioport_resource || r == &iomem_resource) | |
606 | continue; | |
607 | if (r && (r->flags & type_mask) == type && !r->parent) | |
608 | return r; | |
609 | } | |
610 | return NULL; | |
611 | } | |
612 | ||
613 | static resource_size_t calculate_iosize(resource_size_t size, | |
614 | resource_size_t min_size, | |
615 | resource_size_t size1, | |
616 | resource_size_t old_size, | |
617 | resource_size_t align) | |
618 | { | |
619 | if (size < min_size) | |
620 | size = min_size; | |
621 | if (old_size == 1 ) | |
622 | old_size = 0; | |
623 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | |
624 | flag in the struct pci_bus. */ | |
625 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | |
626 | size = (size & 0xff) + ((size & ~0xffUL) << 2); | |
627 | #endif | |
628 | size = ALIGN(size + size1, align); | |
629 | if (size < old_size) | |
630 | size = old_size; | |
631 | return size; | |
632 | } | |
633 | ||
634 | static resource_size_t calculate_memsize(resource_size_t size, | |
635 | resource_size_t min_size, | |
636 | resource_size_t size1, | |
637 | resource_size_t old_size, | |
638 | resource_size_t align) | |
639 | { | |
640 | if (size < min_size) | |
641 | size = min_size; | |
642 | if (old_size == 1 ) | |
643 | old_size = 0; | |
644 | if (size < old_size) | |
645 | size = old_size; | |
646 | size = ALIGN(size + size1, align); | |
647 | return size; | |
648 | } | |
649 | ||
650 | /** | |
651 | * pbus_size_io() - size the io window of a given bus | |
652 | * | |
653 | * @bus : the bus | |
654 | * @min_size : the minimum io window that must to be allocated | |
655 | * @add_size : additional optional io window | |
656 | * @realloc_head : track the additional io window on this list | |
657 | * | |
658 | * Sizing the IO windows of the PCI-PCI bridge is trivial, | |
659 | * since these windows have 4K granularity and the IO ranges | |
660 | * of non-bridge PCI devices are limited to 256 bytes. | |
661 | * We must be careful with the ISA aliasing though. | |
662 | */ | |
663 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |
664 | resource_size_t add_size, struct resource_list_x *realloc_head) | |
665 | { | |
666 | struct pci_dev *dev; | |
667 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); | |
668 | unsigned long size = 0, size0 = 0, size1 = 0; | |
669 | resource_size_t children_add_size = 0; | |
670 | ||
671 | if (!b_res) | |
672 | return; | |
673 | ||
674 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
675 | int i; | |
676 | ||
677 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | |
678 | struct resource *r = &dev->resource[i]; | |
679 | unsigned long r_size; | |
680 | ||
681 | if (r->parent || !(r->flags & IORESOURCE_IO)) | |
682 | continue; | |
683 | r_size = resource_size(r); | |
684 | ||
685 | if (r_size < 0x400) | |
686 | /* Might be re-aligned for ISA */ | |
687 | size += r_size; | |
688 | else | |
689 | size1 += r_size; | |
690 | ||
691 | if (realloc_head) | |
692 | children_add_size += get_res_add_size(realloc_head, r); | |
693 | } | |
694 | } | |
695 | size0 = calculate_iosize(size, min_size, size1, | |
696 | resource_size(b_res), 4096); | |
697 | if (children_add_size > add_size) | |
698 | add_size = children_add_size; | |
699 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | |
700 | calculate_iosize(size, min_size, add_size + size1, | |
701 | resource_size(b_res), 4096); | |
702 | if (!size0 && !size1) { | |
703 | if (b_res->start || b_res->end) | |
704 | dev_info(&bus->self->dev, "disabling bridge window " | |
705 | "%pR to [bus %02x-%02x] (unused)\n", b_res, | |
706 | bus->secondary, bus->subordinate); | |
707 | b_res->flags = 0; | |
708 | return; | |
709 | } | |
710 | /* Alignment of the IO window is always 4K */ | |
711 | b_res->start = 4096; | |
712 | b_res->end = b_res->start + size0 - 1; | |
713 | b_res->flags |= IORESOURCE_STARTALIGN; | |
714 | if (size1 > size0 && realloc_head) | |
715 | add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096); | |
716 | } | |
717 | ||
718 | /** | |
719 | * pbus_size_mem() - size the memory window of a given bus | |
720 | * | |
721 | * @bus : the bus | |
722 | * @min_size : the minimum memory window that must to be allocated | |
723 | * @add_size : additional optional memory window | |
724 | * @realloc_head : track the additional memory window on this list | |
725 | * | |
726 | * Calculate the size of the bus and minimal alignment which | |
727 | * guarantees that all child resources fit in this size. | |
728 | */ | |
729 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |
730 | unsigned long type, resource_size_t min_size, | |
731 | resource_size_t add_size, | |
732 | struct resource_list_x *realloc_head) | |
733 | { | |
734 | struct pci_dev *dev; | |
735 | resource_size_t min_align, align, size, size0, size1; | |
736 | resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ | |
737 | int order, max_order; | |
738 | struct resource *b_res = find_free_bus_resource(bus, type); | |
739 | unsigned int mem64_mask = 0; | |
740 | resource_size_t children_add_size = 0; | |
741 | ||
742 | if (!b_res) | |
743 | return 0; | |
744 | ||
745 | memset(aligns, 0, sizeof(aligns)); | |
746 | max_order = 0; | |
747 | size = 0; | |
748 | ||
749 | mem64_mask = b_res->flags & IORESOURCE_MEM_64; | |
750 | b_res->flags &= ~IORESOURCE_MEM_64; | |
751 | ||
752 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
753 | int i; | |
754 | ||
755 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | |
756 | struct resource *r = &dev->resource[i]; | |
757 | resource_size_t r_size; | |
758 | ||
759 | if (r->parent || (r->flags & mask) != type) | |
760 | continue; | |
761 | r_size = resource_size(r); | |
762 | #ifdef CONFIG_PCI_IOV | |
763 | /* put SRIOV requested res to the optional list */ | |
764 | if (realloc_head && i >= PCI_IOV_RESOURCES && | |
765 | i <= PCI_IOV_RESOURCE_END) { | |
766 | r->end = r->start - 1; | |
767 | add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); | |
768 | children_add_size += r_size; | |
769 | continue; | |
770 | } | |
771 | #endif | |
772 | /* For bridges size != alignment */ | |
773 | align = pci_resource_alignment(dev, r); | |
774 | order = __ffs(align) - 20; | |
775 | if (order > 11) { | |
776 | dev_warn(&dev->dev, "disabling BAR %d: %pR " | |
777 | "(bad alignment %#llx)\n", i, r, | |
778 | (unsigned long long) align); | |
779 | r->flags = 0; | |
780 | continue; | |
781 | } | |
782 | size += r_size; | |
783 | if (order < 0) | |
784 | order = 0; | |
785 | /* Exclude ranges with size > align from | |
786 | calculation of the alignment. */ | |
787 | if (r_size == align) | |
788 | aligns[order] += align; | |
789 | if (order > max_order) | |
790 | max_order = order; | |
791 | mem64_mask &= r->flags & IORESOURCE_MEM_64; | |
792 | ||
793 | if (realloc_head) | |
794 | children_add_size += get_res_add_size(realloc_head, r); | |
795 | } | |
796 | } | |
797 | align = 0; | |
798 | min_align = 0; | |
799 | for (order = 0; order <= max_order; order++) { | |
800 | resource_size_t align1 = 1; | |
801 | ||
802 | align1 <<= (order + 20); | |
803 | ||
804 | if (!align) | |
805 | min_align = align1; | |
806 | else if (ALIGN(align + min_align, min_align) < align1) | |
807 | min_align = align1 >> 1; | |
808 | align += aligns[order]; | |
809 | } | |
810 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); | |
811 | if (children_add_size > add_size) | |
812 | add_size = children_add_size; | |
813 | size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : | |
814 | calculate_memsize(size, min_size, add_size, | |
815 | resource_size(b_res), min_align); | |
816 | if (!size0 && !size1) { | |
817 | if (b_res->start || b_res->end) | |
818 | dev_info(&bus->self->dev, "disabling bridge window " | |
819 | "%pR to [bus %02x-%02x] (unused)\n", b_res, | |
820 | bus->secondary, bus->subordinate); | |
821 | b_res->flags = 0; | |
822 | return 1; | |
823 | } | |
824 | b_res->start = min_align; | |
825 | b_res->end = size0 + min_align - 1; | |
826 | b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; | |
827 | if (size1 > size0 && realloc_head) | |
828 | add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); | |
829 | return 1; | |
830 | } | |
831 | ||
832 | unsigned long pci_cardbus_resource_alignment(struct resource *res) | |
833 | { | |
834 | if (res->flags & IORESOURCE_IO) | |
835 | return pci_cardbus_io_size; | |
836 | if (res->flags & IORESOURCE_MEM) | |
837 | return pci_cardbus_mem_size; | |
838 | return 0; | |
839 | } | |
840 | ||
841 | static void pci_bus_size_cardbus(struct pci_bus *bus, | |
842 | struct resource_list_x *realloc_head) | |
843 | { | |
844 | struct pci_dev *bridge = bus->self; | |
845 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; | |
846 | u16 ctrl; | |
847 | ||
848 | /* | |
849 | * Reserve some resources for CardBus. We reserve | |
850 | * a fixed amount of bus space for CardBus bridges. | |
851 | */ | |
852 | b_res[0].start = 0; | |
853 | b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | |
854 | if (realloc_head) | |
855 | add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */); | |
856 | ||
857 | b_res[1].start = 0; | |
858 | b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; | |
859 | if (realloc_head) | |
860 | add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */); | |
861 | ||
862 | /* | |
863 | * Check whether prefetchable memory is supported | |
864 | * by this bridge. | |
865 | */ | |
866 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); | |
867 | if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) { | |
868 | ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; | |
869 | pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl); | |
870 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); | |
871 | } | |
872 | ||
873 | /* | |
874 | * If we have prefetchable memory support, allocate | |
875 | * two regions. Otherwise, allocate one region of | |
876 | * twice the size. | |
877 | */ | |
878 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { | |
879 | b_res[2].start = 0; | |
880 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; | |
881 | if (realloc_head) | |
882 | add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */); | |
883 | ||
884 | b_res[3].start = 0; | |
885 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | |
886 | if (realloc_head) | |
887 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */); | |
888 | } else { | |
889 | b_res[3].start = 0; | |
890 | b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; | |
891 | if (realloc_head) | |
892 | add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */); | |
893 | } | |
894 | ||
895 | /* set the size of the resource to zero, so that the resource does not | |
896 | * get assigned during required-resource allocation cycle but gets assigned | |
897 | * during the optional-resource allocation cycle. | |
898 | */ | |
899 | b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1; | |
900 | b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0; | |
901 | } | |
902 | ||
903 | void __ref __pci_bus_size_bridges(struct pci_bus *bus, | |
904 | struct resource_list_x *realloc_head) | |
905 | { | |
906 | struct pci_dev *dev; | |
907 | unsigned long mask, prefmask; | |
908 | resource_size_t additional_mem_size = 0, additional_io_size = 0; | |
909 | ||
910 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
911 | struct pci_bus *b = dev->subordinate; | |
912 | if (!b) | |
913 | continue; | |
914 | ||
915 | switch (dev->class >> 8) { | |
916 | case PCI_CLASS_BRIDGE_CARDBUS: | |
917 | pci_bus_size_cardbus(b, realloc_head); | |
918 | break; | |
919 | ||
920 | case PCI_CLASS_BRIDGE_PCI: | |
921 | default: | |
922 | __pci_bus_size_bridges(b, realloc_head); | |
923 | break; | |
924 | } | |
925 | } | |
926 | ||
927 | /* The root bus? */ | |
928 | if (!bus->self) | |
929 | return; | |
930 | ||
931 | switch (bus->self->class >> 8) { | |
932 | case PCI_CLASS_BRIDGE_CARDBUS: | |
933 | /* don't size cardbuses yet. */ | |
934 | break; | |
935 | ||
936 | case PCI_CLASS_BRIDGE_PCI: | |
937 | pci_bridge_check_ranges(bus); | |
938 | if (bus->self->is_hotplug_bridge) { | |
939 | additional_io_size = pci_hotplug_io_size; | |
940 | additional_mem_size = pci_hotplug_mem_size; | |
941 | } | |
942 | /* | |
943 | * Follow thru | |
944 | */ | |
945 | default: | |
946 | pbus_size_io(bus, 0, additional_io_size, realloc_head); | |
947 | /* If the bridge supports prefetchable range, size it | |
948 | separately. If it doesn't, or its prefetchable window | |
949 | has already been allocated by arch code, try | |
950 | non-prefetchable range for both types of PCI memory | |
951 | resources. */ | |
952 | mask = IORESOURCE_MEM; | |
953 | prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; | |
954 | if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head)) | |
955 | mask = prefmask; /* Success, size non-prefetch only. */ | |
956 | else | |
957 | additional_mem_size += additional_mem_size; | |
958 | pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head); | |
959 | break; | |
960 | } | |
961 | } | |
962 | ||
963 | void __ref pci_bus_size_bridges(struct pci_bus *bus) | |
964 | { | |
965 | __pci_bus_size_bridges(bus, NULL); | |
966 | } | |
967 | EXPORT_SYMBOL(pci_bus_size_bridges); | |
968 | ||
969 | static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, | |
970 | struct resource_list_x *realloc_head, | |
971 | struct resource_list_x *fail_head) | |
972 | { | |
973 | struct pci_bus *b; | |
974 | struct pci_dev *dev; | |
975 | ||
976 | pbus_assign_resources_sorted(bus, realloc_head, fail_head); | |
977 | ||
978 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
979 | b = dev->subordinate; | |
980 | if (!b) | |
981 | continue; | |
982 | ||
983 | __pci_bus_assign_resources(b, realloc_head, fail_head); | |
984 | ||
985 | switch (dev->class >> 8) { | |
986 | case PCI_CLASS_BRIDGE_PCI: | |
987 | if (!pci_is_enabled(dev)) | |
988 | pci_setup_bridge(b); | |
989 | break; | |
990 | ||
991 | case PCI_CLASS_BRIDGE_CARDBUS: | |
992 | pci_setup_cardbus(b); | |
993 | break; | |
994 | ||
995 | default: | |
996 | dev_info(&dev->dev, "not setting up bridge for bus " | |
997 | "%04x:%02x\n", pci_domain_nr(b), b->number); | |
998 | break; | |
999 | } | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | void __ref pci_bus_assign_resources(const struct pci_bus *bus) | |
1004 | { | |
1005 | __pci_bus_assign_resources(bus, NULL, NULL); | |
1006 | } | |
1007 | EXPORT_SYMBOL(pci_bus_assign_resources); | |
1008 | ||
1009 | static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, | |
1010 | struct resource_list_x *add_head, | |
1011 | struct resource_list_x *fail_head) | |
1012 | { | |
1013 | struct pci_bus *b; | |
1014 | ||
1015 | pdev_assign_resources_sorted((struct pci_dev *)bridge, | |
1016 | add_head, fail_head); | |
1017 | ||
1018 | b = bridge->subordinate; | |
1019 | if (!b) | |
1020 | return; | |
1021 | ||
1022 | __pci_bus_assign_resources(b, add_head, fail_head); | |
1023 | ||
1024 | switch (bridge->class >> 8) { | |
1025 | case PCI_CLASS_BRIDGE_PCI: | |
1026 | pci_setup_bridge(b); | |
1027 | break; | |
1028 | ||
1029 | case PCI_CLASS_BRIDGE_CARDBUS: | |
1030 | pci_setup_cardbus(b); | |
1031 | break; | |
1032 | ||
1033 | default: | |
1034 | dev_info(&bridge->dev, "not setting up bridge for bus " | |
1035 | "%04x:%02x\n", pci_domain_nr(b), b->number); | |
1036 | break; | |
1037 | } | |
1038 | } | |
1039 | static void pci_bridge_release_resources(struct pci_bus *bus, | |
1040 | unsigned long type) | |
1041 | { | |
1042 | int idx; | |
1043 | bool changed = false; | |
1044 | struct pci_dev *dev; | |
1045 | struct resource *r; | |
1046 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | |
1047 | IORESOURCE_PREFETCH; | |
1048 | ||
1049 | dev = bus->self; | |
1050 | for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; | |
1051 | idx++) { | |
1052 | r = &dev->resource[idx]; | |
1053 | if ((r->flags & type_mask) != type) | |
1054 | continue; | |
1055 | if (!r->parent) | |
1056 | continue; | |
1057 | /* | |
1058 | * if there are children under that, we should release them | |
1059 | * all | |
1060 | */ | |
1061 | release_child_resources(r); | |
1062 | if (!release_resource(r)) { | |
1063 | dev_printk(KERN_DEBUG, &dev->dev, | |
1064 | "resource %d %pR released\n", idx, r); | |
1065 | /* keep the old size */ | |
1066 | r->end = resource_size(r) - 1; | |
1067 | r->start = 0; | |
1068 | r->flags = 0; | |
1069 | changed = true; | |
1070 | } | |
1071 | } | |
1072 | ||
1073 | if (changed) { | |
1074 | /* avoiding touch the one without PREF */ | |
1075 | if (type & IORESOURCE_PREFETCH) | |
1076 | type = IORESOURCE_PREFETCH; | |
1077 | __pci_setup_bridge(bus, type); | |
1078 | } | |
1079 | } | |
1080 | ||
1081 | enum release_type { | |
1082 | leaf_only, | |
1083 | whole_subtree, | |
1084 | }; | |
1085 | /* | |
1086 | * try to release pci bridge resources that is from leaf bridge, | |
1087 | * so we can allocate big new one later | |
1088 | */ | |
1089 | static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, | |
1090 | unsigned long type, | |
1091 | enum release_type rel_type) | |
1092 | { | |
1093 | struct pci_dev *dev; | |
1094 | bool is_leaf_bridge = true; | |
1095 | ||
1096 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
1097 | struct pci_bus *b = dev->subordinate; | |
1098 | if (!b) | |
1099 | continue; | |
1100 | ||
1101 | is_leaf_bridge = false; | |
1102 | ||
1103 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | |
1104 | continue; | |
1105 | ||
1106 | if (rel_type == whole_subtree) | |
1107 | pci_bus_release_bridge_resources(b, type, | |
1108 | whole_subtree); | |
1109 | } | |
1110 | ||
1111 | if (pci_is_root_bus(bus)) | |
1112 | return; | |
1113 | ||
1114 | if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) | |
1115 | return; | |
1116 | ||
1117 | if ((rel_type == whole_subtree) || is_leaf_bridge) | |
1118 | pci_bridge_release_resources(bus, type); | |
1119 | } | |
1120 | ||
1121 | static void pci_bus_dump_res(struct pci_bus *bus) | |
1122 | { | |
1123 | struct resource *res; | |
1124 | int i; | |
1125 | ||
1126 | pci_bus_for_each_resource(bus, res, i) { | |
1127 | if (!res || !res->end || !res->flags) | |
1128 | continue; | |
1129 | ||
1130 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | static void pci_bus_dump_resources(struct pci_bus *bus) | |
1135 | { | |
1136 | struct pci_bus *b; | |
1137 | struct pci_dev *dev; | |
1138 | ||
1139 | ||
1140 | pci_bus_dump_res(bus); | |
1141 | ||
1142 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
1143 | b = dev->subordinate; | |
1144 | if (!b) | |
1145 | continue; | |
1146 | ||
1147 | pci_bus_dump_resources(b); | |
1148 | } | |
1149 | } | |
1150 | ||
1151 | static int __init pci_bus_get_depth(struct pci_bus *bus) | |
1152 | { | |
1153 | int depth = 0; | |
1154 | struct pci_dev *dev; | |
1155 | ||
1156 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
1157 | int ret; | |
1158 | struct pci_bus *b = dev->subordinate; | |
1159 | if (!b) | |
1160 | continue; | |
1161 | ||
1162 | ret = pci_bus_get_depth(b); | |
1163 | if (ret + 1 > depth) | |
1164 | depth = ret + 1; | |
1165 | } | |
1166 | ||
1167 | return depth; | |
1168 | } | |
1169 | static int __init pci_get_max_depth(void) | |
1170 | { | |
1171 | int depth = 0; | |
1172 | struct pci_bus *bus; | |
1173 | ||
1174 | list_for_each_entry(bus, &pci_root_buses, node) { | |
1175 | int ret; | |
1176 | ||
1177 | ret = pci_bus_get_depth(bus); | |
1178 | if (ret > depth) | |
1179 | depth = ret; | |
1180 | } | |
1181 | ||
1182 | return depth; | |
1183 | } | |
1184 | ||
1185 | ||
1186 | /* | |
1187 | * first try will not touch pci bridge res | |
1188 | * second and later try will clear small leaf bridge res | |
1189 | * will stop till to the max deepth if can not find good one | |
1190 | */ | |
1191 | void __init | |
1192 | pci_assign_unassigned_resources(void) | |
1193 | { | |
1194 | struct pci_bus *bus; | |
1195 | struct resource_list_x realloc_list; /* list of resources that | |
1196 | want additional resources */ | |
1197 | int tried_times = 0; | |
1198 | enum release_type rel_type = leaf_only; | |
1199 | struct resource_list_x head, *list; | |
1200 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | |
1201 | IORESOURCE_PREFETCH; | |
1202 | unsigned long failed_type; | |
1203 | int max_depth = pci_get_max_depth(); | |
1204 | int pci_try_num; | |
1205 | ||
1206 | ||
1207 | head.next = NULL; | |
1208 | realloc_list.next = NULL; | |
1209 | ||
1210 | pci_try_num = max_depth + 1; | |
1211 | printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", | |
1212 | max_depth, pci_try_num); | |
1213 | ||
1214 | again: | |
1215 | /* Depth first, calculate sizes and alignments of all | |
1216 | subordinate buses. */ | |
1217 | list_for_each_entry(bus, &pci_root_buses, node) | |
1218 | __pci_bus_size_bridges(bus, &realloc_list); | |
1219 | ||
1220 | /* Depth last, allocate resources and update the hardware. */ | |
1221 | list_for_each_entry(bus, &pci_root_buses, node) | |
1222 | __pci_bus_assign_resources(bus, &realloc_list, &head); | |
1223 | BUG_ON(realloc_list.next); | |
1224 | tried_times++; | |
1225 | ||
1226 | /* any device complain? */ | |
1227 | if (!head.next) | |
1228 | goto enable_and_dump; | |
1229 | ||
1230 | /* don't realloc if asked to do so */ | |
1231 | if (!pci_realloc_enabled()) { | |
1232 | free_list(resource_list_x, &head); | |
1233 | goto enable_and_dump; | |
1234 | } | |
1235 | ||
1236 | failed_type = 0; | |
1237 | for (list = head.next; list;) { | |
1238 | failed_type |= list->flags; | |
1239 | list = list->next; | |
1240 | } | |
1241 | /* | |
1242 | * io port are tight, don't try extra | |
1243 | * or if reach the limit, don't want to try more | |
1244 | */ | |
1245 | failed_type &= type_mask; | |
1246 | if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) { | |
1247 | free_list(resource_list_x, &head); | |
1248 | goto enable_and_dump; | |
1249 | } | |
1250 | ||
1251 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | |
1252 | tried_times + 1); | |
1253 | ||
1254 | /* third times and later will not check if it is leaf */ | |
1255 | if ((tried_times + 1) > 2) | |
1256 | rel_type = whole_subtree; | |
1257 | ||
1258 | /* | |
1259 | * Try to release leaf bridge's resources that doesn't fit resource of | |
1260 | * child device under that bridge | |
1261 | */ | |
1262 | for (list = head.next; list;) { | |
1263 | bus = list->dev->bus; | |
1264 | pci_bus_release_bridge_resources(bus, list->flags & type_mask, | |
1265 | rel_type); | |
1266 | list = list->next; | |
1267 | } | |
1268 | /* restore size and flags */ | |
1269 | for (list = head.next; list;) { | |
1270 | struct resource *res = list->res; | |
1271 | ||
1272 | res->start = list->start; | |
1273 | res->end = list->end; | |
1274 | res->flags = list->flags; | |
1275 | if (list->dev->subordinate) | |
1276 | res->flags = 0; | |
1277 | ||
1278 | list = list->next; | |
1279 | } | |
1280 | free_list(resource_list_x, &head); | |
1281 | ||
1282 | goto again; | |
1283 | ||
1284 | enable_and_dump: | |
1285 | /* Depth last, update the hardware. */ | |
1286 | list_for_each_entry(bus, &pci_root_buses, node) | |
1287 | pci_enable_bridges(bus); | |
1288 | ||
1289 | /* dump the resource on buses */ | |
1290 | list_for_each_entry(bus, &pci_root_buses, node) | |
1291 | pci_bus_dump_resources(bus); | |
1292 | } | |
1293 | ||
1294 | void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) | |
1295 | { | |
1296 | struct pci_bus *parent = bridge->subordinate; | |
1297 | struct resource_list_x add_list; /* list of resources that | |
1298 | want additional resources */ | |
1299 | int tried_times = 0; | |
1300 | struct resource_list_x head, *list; | |
1301 | int retval; | |
1302 | unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | | |
1303 | IORESOURCE_PREFETCH; | |
1304 | ||
1305 | head.next = NULL; | |
1306 | add_list.next = NULL; | |
1307 | ||
1308 | again: | |
1309 | __pci_bus_size_bridges(parent, &add_list); | |
1310 | __pci_bridge_assign_resources(bridge, &add_list, &head); | |
1311 | BUG_ON(add_list.next); | |
1312 | tried_times++; | |
1313 | ||
1314 | if (!head.next) | |
1315 | goto enable_all; | |
1316 | ||
1317 | if (tried_times >= 2) { | |
1318 | /* still fail, don't need to try more */ | |
1319 | free_list(resource_list_x, &head); | |
1320 | goto enable_all; | |
1321 | } | |
1322 | ||
1323 | printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", | |
1324 | tried_times + 1); | |
1325 | ||
1326 | /* | |
1327 | * Try to release leaf bridge's resources that doesn't fit resource of | |
1328 | * child device under that bridge | |
1329 | */ | |
1330 | for (list = head.next; list;) { | |
1331 | struct pci_bus *bus = list->dev->bus; | |
1332 | unsigned long flags = list->flags; | |
1333 | ||
1334 | pci_bus_release_bridge_resources(bus, flags & type_mask, | |
1335 | whole_subtree); | |
1336 | list = list->next; | |
1337 | } | |
1338 | /* restore size and flags */ | |
1339 | for (list = head.next; list;) { | |
1340 | struct resource *res = list->res; | |
1341 | ||
1342 | res->start = list->start; | |
1343 | res->end = list->end; | |
1344 | res->flags = list->flags; | |
1345 | if (list->dev->subordinate) | |
1346 | res->flags = 0; | |
1347 | ||
1348 | list = list->next; | |
1349 | } | |
1350 | free_list(resource_list_x, &head); | |
1351 | ||
1352 | goto again; | |
1353 | ||
1354 | enable_all: | |
1355 | retval = pci_reenable_device(bridge); | |
1356 | pci_set_master(bridge); | |
1357 | pci_enable_bridges(parent); | |
1358 | } | |
1359 | EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); |