2 * drivers/pci/setup-bus.c
4 * Extruded from code written by
5 * Dave Rusling (david.rusling@reo.mts.dec.com)
6 * David Mosberger (davidm@cs.arizona.edu)
7 * David Miller (davem@redhat.com)
9 * Support routines for initializing a PCI subsystem.
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 * PCI-PCI bridges cleanup, sorted resource allocation.
15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16 * Converted to allocation in 3 passes, which gives
17 * tighter packing. Prefetchable range support.
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
30 struct pci_dev_resource {
31 struct list_head list;
34 resource_size_t start;
36 resource_size_t add_size;
37 resource_size_t min_align;
41 #define free_list(type, head) do { \
42 struct type *dev_res, *tmp; \
43 list_for_each_entry_safe(dev_res, tmp, head, list) { \
44 list_del(&dev_res->list); \
49 int pci_realloc_enable = 0;
50 #define pci_realloc_enabled() pci_realloc_enable
51 void pci_realloc(void)
53 pci_realloc_enable = 1;
57 * add_to_list() - add a new resource tracker to the list
58 * @head: Head of the list
59 * @dev: device corresponding to which the resource
61 * @res: The resource to be tracked
62 * @add_size: additional size to be optionally added
65 static int add_to_list(struct list_head *head,
66 struct pci_dev *dev, struct resource *res,
67 resource_size_t add_size, resource_size_t min_align)
69 struct pci_dev_resource *tmp;
71 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
73 pr_warning("add_to_list: kmalloc() failed!\n");
79 tmp->start = res->start;
81 tmp->flags = res->flags;
82 tmp->add_size = add_size;
83 tmp->min_align = min_align;
85 list_add(&tmp->list, head);
90 static void add_to_failed_list(struct list_head *head,
91 struct pci_dev *dev, struct resource *res)
93 add_to_list(head, dev, res,
98 static void remove_from_list(struct list_head *realloc_head,
101 struct pci_dev_resource *dev_res_x, *tmp;
103 list_for_each_entry_safe(dev_res_x, tmp, realloc_head, list) {
104 if (dev_res_x->res == res) {
105 list_del(&dev_res_x->list);
112 static resource_size_t get_res_add_size(struct list_head *realloc_head,
113 struct resource *res)
115 struct pci_dev_resource *dev_res_x;
117 list_for_each_entry(dev_res_x, realloc_head, list) {
118 if (dev_res_x->res == res) {
119 dev_printk(KERN_DEBUG, &dev_res_x->dev->dev,
120 "%pR get_res_add_size add_size %llx\n",
122 (unsigned long long)dev_res_x->add_size);
123 return dev_res_x->add_size;
130 /* Sort resources by alignment */
131 static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
135 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
137 struct pci_dev_resource *dev_res, *tmp;
138 resource_size_t r_align;
141 r = &dev->resource[i];
143 if (r->flags & IORESOURCE_PCI_FIXED)
146 if (!(r->flags) || r->parent)
149 r_align = pci_resource_alignment(dev, r);
151 dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
156 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
158 panic("pdev_sort_resources(): "
159 "kmalloc() failed!\n");
163 /* fallback is smallest one or list is empty*/
165 list_for_each_entry(dev_res, head, list) {
166 resource_size_t align;
168 align = pci_resource_alignment(dev_res->dev,
171 if (r_align > align) {
176 /* Insert it just before n*/
177 list_add_tail(&tmp->list, n);
181 static void __dev_sort_resources(struct pci_dev *dev,
182 struct list_head *head)
184 u16 class = dev->class >> 8;
186 /* Don't touch classless devices or host bridges or ioapics. */
187 if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
190 /* Don't touch ioapic devices already enabled by firmware */
191 if (class == PCI_CLASS_SYSTEM_PIC) {
193 pci_read_config_word(dev, PCI_COMMAND, &command);
194 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
198 pdev_sort_resources(dev, head);
201 static inline void reset_resource(struct resource *res)
209 * reassign_resources_sorted() - satisfy any additional resource requests
211 * @realloc_head : head of the list tracking requests requiring additional
213 * @head : head of the list tracking requests with allocated
216 * Walk through each element of the realloc_head and try to procure
217 * additional resources for the element, provided the element
218 * is in the head list.
220 static void reassign_resources_sorted(struct list_head *realloc_head,
221 struct list_head *head)
223 struct resource *res;
224 struct pci_dev_resource *dev_res_x, *tmp;
225 struct pci_dev_resource *dev_res;
226 resource_size_t add_size;
229 list_for_each_entry_safe(dev_res_x, tmp, realloc_head, list) {
230 bool found_match = false;
232 res = dev_res_x->res;
233 /* skip resource that has been reset */
237 /* skip this resource if not found in head list */
238 list_for_each_entry(dev_res, head, list) {
239 if (dev_res->res == res) {
244 if (!found_match)/* just skip */
247 idx = res - &dev_res_x->dev->resource[0];
248 add_size = dev_res_x->add_size;
249 if (!resource_size(res)) {
250 res->start = dev_res_x->start;
251 res->end = res->start + add_size - 1;
252 if (pci_assign_resource(dev_res_x->dev, idx))
255 resource_size_t align = dev_res_x->min_align;
256 res->flags |= dev_res_x->flags &
257 (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
258 if (pci_reassign_resource(dev_res_x->dev, idx,
260 dev_printk(KERN_DEBUG, &dev_res_x->dev->dev,
261 "failed to add optional resources res=%pR\n",
265 list_del(&dev_res_x->list);
271 * assign_requested_resources_sorted() - satisfy resource requests
273 * @head : head of the list tracking requests for resources
274 * @failed_list : head of the list tracking requests that could
277 * Satisfy resource requests of each element in the list. Add
278 * requests that could not satisfied to the failed_list.
280 static void assign_requested_resources_sorted(struct list_head *head,
281 struct list_head *fail_head)
283 struct resource *res;
284 struct pci_dev_resource *dev_res;
287 list_for_each_entry(dev_res, head, list) {
289 idx = res - &dev_res->dev->resource[0];
290 if (resource_size(res) &&
291 pci_assign_resource(dev_res->dev, idx)) {
292 if (fail_head && !pci_is_root_bus(dev_res->dev->bus)) {
294 * if the failed res is for ROM BAR, and it will
295 * be enabled later, don't add it to the list
297 if (!((idx == PCI_ROM_RESOURCE) &&
298 (!(res->flags & IORESOURCE_ROM_ENABLE))))
299 add_to_failed_list(fail_head,
307 static void __assign_resources_sorted(struct list_head *head,
308 struct list_head *realloc_head,
309 struct list_head *fail_head)
312 * Should not assign requested resources at first.
313 * they could be adjacent, so later reassign can not reallocate
314 * them one by one in parent resource window.
315 * Try to assign requested + add_size at begining
316 * if could do that, could get out early.
317 * if could not do that, we still try to assign requested at first,
318 * then try to reassign add_size for some resources.
320 LIST_HEAD(save_head);
321 LIST_HEAD(local_fail_head);
322 struct pci_dev_resource *dev_res_x;
323 struct pci_dev_resource *dev_res;
325 /* Check if optional add_size is there */
326 if (!realloc_head || list_empty(realloc_head))
327 goto requested_and_reassign;
329 /* Save original start, end, flags etc at first */
330 list_for_each_entry(dev_res, head, list) {
331 if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
332 free_list(pci_dev_resource, &save_head);
333 goto requested_and_reassign;
337 /* Update res in head list with add_size in realloc_head list */
338 list_for_each_entry(dev_res, head, list)
339 dev_res->res->end += get_res_add_size(realloc_head,
342 /* Try updated head list with add_size added */
343 assign_requested_resources_sorted(head, &local_fail_head);
345 /* all assigned with add_size ? */
346 if (list_empty(&local_fail_head)) {
347 /* Remove head list from realloc_head list */
348 list_for_each_entry(dev_res, head, list)
349 remove_from_list(realloc_head, dev_res->res);
350 free_list(pci_dev_resource, &save_head);
351 free_list(pci_dev_resource, head);
355 free_list(pci_dev_resource, &local_fail_head);
356 /* Release assigned resource */
357 list_for_each_entry(dev_res, head, list)
358 if (dev_res->res->parent)
359 release_resource(dev_res->res);
360 /* Restore start/end/flags from saved list */
361 list_for_each_entry(dev_res_x, &save_head, list) {
362 struct resource *res = dev_res_x->res;
364 res->start = dev_res_x->start;
365 res->end = dev_res_x->end;
366 res->flags = dev_res_x->flags;
368 free_list(pci_dev_resource, &save_head);
370 requested_and_reassign:
371 /* Satisfy the must-have resource requests */
372 assign_requested_resources_sorted(head, fail_head);
374 /* Try to satisfy any additional optional resource
377 reassign_resources_sorted(realloc_head, head);
378 free_list(pci_dev_resource, head);
381 static void pdev_assign_resources_sorted(struct pci_dev *dev,
382 struct list_head *add_head,
383 struct list_head *fail_head)
387 __dev_sort_resources(dev, &head);
388 __assign_resources_sorted(&head, add_head, fail_head);
392 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
393 struct list_head *realloc_head,
394 struct list_head *fail_head)
399 list_for_each_entry(dev, &bus->devices, bus_list)
400 __dev_sort_resources(dev, &head);
402 __assign_resources_sorted(&head, realloc_head, fail_head);
405 void pci_setup_cardbus(struct pci_bus *bus)
407 struct pci_dev *bridge = bus->self;
408 struct resource *res;
409 struct pci_bus_region region;
411 dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
412 bus->secondary, bus->subordinate);
414 res = bus->resource[0];
415 pcibios_resource_to_bus(bridge, ®ion, res);
416 if (res->flags & IORESOURCE_IO) {
418 * The IO resource is allocated a range twice as large as it
419 * would normally need. This allows us to set both IO regs.
421 dev_info(&bridge->dev, " bridge window %pR\n", res);
422 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
424 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
428 res = bus->resource[1];
429 pcibios_resource_to_bus(bridge, ®ion, res);
430 if (res->flags & IORESOURCE_IO) {
431 dev_info(&bridge->dev, " bridge window %pR\n", res);
432 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
434 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
438 res = bus->resource[2];
439 pcibios_resource_to_bus(bridge, ®ion, res);
440 if (res->flags & IORESOURCE_MEM) {
441 dev_info(&bridge->dev, " bridge window %pR\n", res);
442 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
444 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
448 res = bus->resource[3];
449 pcibios_resource_to_bus(bridge, ®ion, res);
450 if (res->flags & IORESOURCE_MEM) {
451 dev_info(&bridge->dev, " bridge window %pR\n", res);
452 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
454 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
458 EXPORT_SYMBOL(pci_setup_cardbus);
460 /* Initialize bridges with base/limit values we have collected.
461 PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
462 requires that if there is no I/O ports or memory behind the
463 bridge, corresponding range must be turned off by writing base
464 value greater than limit to the bridge's base/limit registers.
466 Note: care must be taken when updating I/O base/limit registers
467 of bridges which support 32-bit I/O. This update requires two
468 config space writes, so it's quite possible that an I/O window of
469 the bridge will have some undesirable address (e.g. 0) after the
470 first write. Ditto 64-bit prefetchable MMIO. */
471 static void pci_setup_bridge_io(struct pci_bus *bus)
473 struct pci_dev *bridge = bus->self;
474 struct resource *res;
475 struct pci_bus_region region;
478 /* Set up the top and bottom of the PCI I/O segment for this bus. */
479 res = bus->resource[0];
480 pcibios_resource_to_bus(bridge, ®ion, res);
481 if (res->flags & IORESOURCE_IO) {
482 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
484 l |= (region.start >> 8) & 0x00f0;
485 l |= region.end & 0xf000;
486 /* Set up upper 16 bits of I/O base/limit. */
487 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
488 dev_info(&bridge->dev, " bridge window %pR\n", res);
490 /* Clear upper 16 bits of I/O base/limit. */
494 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
495 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
496 /* Update lower 16 bits of I/O base/limit. */
497 pci_write_config_dword(bridge, PCI_IO_BASE, l);
498 /* Update upper 16 bits of I/O base/limit. */
499 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
502 static void pci_setup_bridge_mmio(struct pci_bus *bus)
504 struct pci_dev *bridge = bus->self;
505 struct resource *res;
506 struct pci_bus_region region;
509 /* Set up the top and bottom of the PCI Memory segment for this bus. */
510 res = bus->resource[1];
511 pcibios_resource_to_bus(bridge, ®ion, res);
512 if (res->flags & IORESOURCE_MEM) {
513 l = (region.start >> 16) & 0xfff0;
514 l |= region.end & 0xfff00000;
515 dev_info(&bridge->dev, " bridge window %pR\n", res);
519 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
522 static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
524 struct pci_dev *bridge = bus->self;
525 struct resource *res;
526 struct pci_bus_region region;
529 /* Clear out the upper 32 bits of PREF limit.
530 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
531 disables PREF range, which is ok. */
532 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
534 /* Set up PREF base/limit. */
536 res = bus->resource[2];
537 pcibios_resource_to_bus(bridge, ®ion, res);
538 if (res->flags & IORESOURCE_PREFETCH) {
539 l = (region.start >> 16) & 0xfff0;
540 l |= region.end & 0xfff00000;
541 if (res->flags & IORESOURCE_MEM_64) {
542 bu = upper_32_bits(region.start);
543 lu = upper_32_bits(region.end);
545 dev_info(&bridge->dev, " bridge window %pR\n", res);
549 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
551 /* Set the upper 32 bits of PREF base & limit. */
552 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
553 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
556 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
558 struct pci_dev *bridge = bus->self;
560 dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
561 bus->secondary, bus->subordinate);
563 if (type & IORESOURCE_IO)
564 pci_setup_bridge_io(bus);
566 if (type & IORESOURCE_MEM)
567 pci_setup_bridge_mmio(bus);
569 if (type & IORESOURCE_PREFETCH)
570 pci_setup_bridge_mmio_pref(bus);
572 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
575 void pci_setup_bridge(struct pci_bus *bus)
577 unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
580 __pci_setup_bridge(bus, type);
583 /* Check whether the bridge supports optional I/O and
584 prefetchable memory ranges. If not, the respective
585 base/limit registers must be read-only and read as 0. */
586 static void pci_bridge_check_ranges(struct pci_bus *bus)
590 struct pci_dev *bridge = bus->self;
591 struct resource *b_res;
593 b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
594 b_res[1].flags |= IORESOURCE_MEM;
596 pci_read_config_word(bridge, PCI_IO_BASE, &io);
598 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
599 pci_read_config_word(bridge, PCI_IO_BASE, &io);
600 pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
603 b_res[0].flags |= IORESOURCE_IO;
604 /* DECchip 21050 pass 2 errata: the bridge may miss an address
605 disconnect boundary by one PCI data phase.
606 Workaround: do not use prefetching on this device. */
607 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
609 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
611 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
613 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
614 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
617 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
618 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
619 PCI_PREF_RANGE_TYPE_64) {
620 b_res[2].flags |= IORESOURCE_MEM_64;
621 b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
625 /* double check if bridge does support 64 bit pref */
626 if (b_res[2].flags & IORESOURCE_MEM_64) {
627 u32 mem_base_hi, tmp;
628 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
630 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
632 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
634 b_res[2].flags &= ~IORESOURCE_MEM_64;
635 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
640 /* Helper function for sizing routines: find first available
641 bus resource of a given type. Note: we intentionally skip
642 the bus resources which have already been assigned (that is,
643 have non-NULL parent resource). */
644 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
648 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
651 pci_bus_for_each_resource(bus, r, i) {
652 if (r == &ioport_resource || r == &iomem_resource)
654 if (r && (r->flags & type_mask) == type && !r->parent)
660 static resource_size_t calculate_iosize(resource_size_t size,
661 resource_size_t min_size,
662 resource_size_t size1,
663 resource_size_t old_size,
664 resource_size_t align)
670 /* To be fixed in 2.5: we should have sort of HAVE_ISA
671 flag in the struct pci_bus. */
672 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
673 size = (size & 0xff) + ((size & ~0xffUL) << 2);
675 size = ALIGN(size + size1, align);
681 static resource_size_t calculate_memsize(resource_size_t size,
682 resource_size_t min_size,
683 resource_size_t size1,
684 resource_size_t old_size,
685 resource_size_t align)
693 size = ALIGN(size + size1, align);
698 * pbus_size_io() - size the io window of a given bus
701 * @min_size : the minimum io window that must to be allocated
702 * @add_size : additional optional io window
703 * @realloc_head : track the additional io window on this list
705 * Sizing the IO windows of the PCI-PCI bridge is trivial,
706 * since these windows have 4K granularity and the IO ranges
707 * of non-bridge PCI devices are limited to 256 bytes.
708 * We must be careful with the ISA aliasing though.
710 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
711 resource_size_t add_size, struct list_head *realloc_head)
714 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
715 unsigned long size = 0, size0 = 0, size1 = 0;
716 resource_size_t children_add_size = 0;
721 list_for_each_entry(dev, &bus->devices, bus_list) {
724 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
725 struct resource *r = &dev->resource[i];
726 unsigned long r_size;
728 if (r->parent || !(r->flags & IORESOURCE_IO))
730 r_size = resource_size(r);
733 /* Might be re-aligned for ISA */
739 children_add_size += get_res_add_size(realloc_head, r);
742 size0 = calculate_iosize(size, min_size, size1,
743 resource_size(b_res), 4096);
744 if (children_add_size > add_size)
745 add_size = children_add_size;
746 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
747 calculate_iosize(size, min_size, add_size + size1,
748 resource_size(b_res), 4096);
749 if (!size0 && !size1) {
750 if (b_res->start || b_res->end)
751 dev_info(&bus->self->dev, "disabling bridge window "
752 "%pR to [bus %02x-%02x] (unused)\n", b_res,
753 bus->secondary, bus->subordinate);
757 /* Alignment of the IO window is always 4K */
759 b_res->end = b_res->start + size0 - 1;
760 b_res->flags |= IORESOURCE_STARTALIGN;
761 if (size1 > size0 && realloc_head)
762 add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
766 * pbus_size_mem() - size the memory window of a given bus
769 * @min_size : the minimum memory window that must to be allocated
770 * @add_size : additional optional memory window
771 * @realloc_head : track the additional memory window on this list
773 * Calculate the size of the bus and minimal alignment which
774 * guarantees that all child resources fit in this size.
776 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
777 unsigned long type, resource_size_t min_size,
778 resource_size_t add_size,
779 struct list_head *realloc_head)
782 resource_size_t min_align, align, size, size0, size1;
783 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
784 int order, max_order;
785 struct resource *b_res = find_free_bus_resource(bus, type);
786 unsigned int mem64_mask = 0;
787 resource_size_t children_add_size = 0;
792 memset(aligns, 0, sizeof(aligns));
796 mem64_mask = b_res->flags & IORESOURCE_MEM_64;
797 b_res->flags &= ~IORESOURCE_MEM_64;
799 list_for_each_entry(dev, &bus->devices, bus_list) {
802 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
803 struct resource *r = &dev->resource[i];
804 resource_size_t r_size;
806 if (r->parent || (r->flags & mask) != type)
808 r_size = resource_size(r);
809 #ifdef CONFIG_PCI_IOV
810 /* put SRIOV requested res to the optional list */
811 if (realloc_head && i >= PCI_IOV_RESOURCES &&
812 i <= PCI_IOV_RESOURCE_END) {
813 r->end = r->start - 1;
814 add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
815 children_add_size += r_size;
819 /* For bridges size != alignment */
820 align = pci_resource_alignment(dev, r);
821 order = __ffs(align) - 20;
823 dev_warn(&dev->dev, "disabling BAR %d: %pR "
824 "(bad alignment %#llx)\n", i, r,
825 (unsigned long long) align);
832 /* Exclude ranges with size > align from
833 calculation of the alignment. */
835 aligns[order] += align;
836 if (order > max_order)
838 mem64_mask &= r->flags & IORESOURCE_MEM_64;
841 children_add_size += get_res_add_size(realloc_head, r);
846 for (order = 0; order <= max_order; order++) {
847 resource_size_t align1 = 1;
849 align1 <<= (order + 20);
853 else if (ALIGN(align + min_align, min_align) < align1)
854 min_align = align1 >> 1;
855 align += aligns[order];
857 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
858 if (children_add_size > add_size)
859 add_size = children_add_size;
860 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
861 calculate_memsize(size, min_size, add_size,
862 resource_size(b_res), min_align);
863 if (!size0 && !size1) {
864 if (b_res->start || b_res->end)
865 dev_info(&bus->self->dev, "disabling bridge window "
866 "%pR to [bus %02x-%02x] (unused)\n", b_res,
867 bus->secondary, bus->subordinate);
871 b_res->start = min_align;
872 b_res->end = size0 + min_align - 1;
873 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
874 if (size1 > size0 && realloc_head)
875 add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
879 unsigned long pci_cardbus_resource_alignment(struct resource *res)
881 if (res->flags & IORESOURCE_IO)
882 return pci_cardbus_io_size;
883 if (res->flags & IORESOURCE_MEM)
884 return pci_cardbus_mem_size;
888 static void pci_bus_size_cardbus(struct pci_bus *bus,
889 struct list_head *realloc_head)
891 struct pci_dev *bridge = bus->self;
892 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
896 * Reserve some resources for CardBus. We reserve
897 * a fixed amount of bus space for CardBus bridges.
900 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
902 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */);
905 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
907 add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */);
910 * Check whether prefetchable memory is supported
913 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
914 if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
915 ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
916 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
917 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
921 * If we have prefetchable memory support, allocate
922 * two regions. Otherwise, allocate one region of
925 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
927 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
929 add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */);
932 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
934 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */);
937 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
939 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */);
942 /* set the size of the resource to zero, so that the resource does not
943 * get assigned during required-resource allocation cycle but gets assigned
944 * during the optional-resource allocation cycle.
946 b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1;
947 b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0;
950 void __ref __pci_bus_size_bridges(struct pci_bus *bus,
951 struct list_head *realloc_head)
954 unsigned long mask, prefmask;
955 resource_size_t additional_mem_size = 0, additional_io_size = 0;
957 list_for_each_entry(dev, &bus->devices, bus_list) {
958 struct pci_bus *b = dev->subordinate;
962 switch (dev->class >> 8) {
963 case PCI_CLASS_BRIDGE_CARDBUS:
964 pci_bus_size_cardbus(b, realloc_head);
967 case PCI_CLASS_BRIDGE_PCI:
969 __pci_bus_size_bridges(b, realloc_head);
978 switch (bus->self->class >> 8) {
979 case PCI_CLASS_BRIDGE_CARDBUS:
980 /* don't size cardbuses yet. */
983 case PCI_CLASS_BRIDGE_PCI:
984 pci_bridge_check_ranges(bus);
985 if (bus->self->is_hotplug_bridge) {
986 additional_io_size = pci_hotplug_io_size;
987 additional_mem_size = pci_hotplug_mem_size;
993 pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
994 additional_io_size, realloc_head);
995 /* If the bridge supports prefetchable range, size it
996 separately. If it doesn't, or its prefetchable window
997 has already been allocated by arch code, try
998 non-prefetchable range for both types of PCI memory
1000 mask = IORESOURCE_MEM;
1001 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
1002 if (pbus_size_mem(bus, prefmask, prefmask,
1003 realloc_head ? 0 : additional_mem_size,
1004 additional_mem_size, realloc_head))
1005 mask = prefmask; /* Success, size non-prefetch only. */
1007 additional_mem_size += additional_mem_size;
1008 pbus_size_mem(bus, mask, IORESOURCE_MEM,
1009 realloc_head ? 0 : additional_mem_size,
1010 additional_mem_size, realloc_head);
1015 void __ref pci_bus_size_bridges(struct pci_bus *bus)
1017 __pci_bus_size_bridges(bus, NULL);
1019 EXPORT_SYMBOL(pci_bus_size_bridges);
1021 static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
1022 struct list_head *realloc_head,
1023 struct list_head *fail_head)
1026 struct pci_dev *dev;
1028 pbus_assign_resources_sorted(bus, realloc_head, fail_head);
1030 list_for_each_entry(dev, &bus->devices, bus_list) {
1031 b = dev->subordinate;
1035 __pci_bus_assign_resources(b, realloc_head, fail_head);
1037 switch (dev->class >> 8) {
1038 case PCI_CLASS_BRIDGE_PCI:
1039 if (!pci_is_enabled(dev))
1040 pci_setup_bridge(b);
1043 case PCI_CLASS_BRIDGE_CARDBUS:
1044 pci_setup_cardbus(b);
1048 dev_info(&dev->dev, "not setting up bridge for bus "
1049 "%04x:%02x\n", pci_domain_nr(b), b->number);
1055 void __ref pci_bus_assign_resources(const struct pci_bus *bus)
1057 __pci_bus_assign_resources(bus, NULL, NULL);
1059 EXPORT_SYMBOL(pci_bus_assign_resources);
1061 static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
1062 struct list_head *add_head,
1063 struct list_head *fail_head)
1067 pdev_assign_resources_sorted((struct pci_dev *)bridge,
1068 add_head, fail_head);
1070 b = bridge->subordinate;
1074 __pci_bus_assign_resources(b, add_head, fail_head);
1076 switch (bridge->class >> 8) {
1077 case PCI_CLASS_BRIDGE_PCI:
1078 pci_setup_bridge(b);
1081 case PCI_CLASS_BRIDGE_CARDBUS:
1082 pci_setup_cardbus(b);
1086 dev_info(&bridge->dev, "not setting up bridge for bus "
1087 "%04x:%02x\n", pci_domain_nr(b), b->number);
1091 static void pci_bridge_release_resources(struct pci_bus *bus,
1095 bool changed = false;
1096 struct pci_dev *dev;
1098 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
1099 IORESOURCE_PREFETCH;
1102 for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
1104 r = &dev->resource[idx];
1105 if ((r->flags & type_mask) != type)
1110 * if there are children under that, we should release them
1113 release_child_resources(r);
1114 if (!release_resource(r)) {
1115 dev_printk(KERN_DEBUG, &dev->dev,
1116 "resource %d %pR released\n", idx, r);
1117 /* keep the old size */
1118 r->end = resource_size(r) - 1;
1126 /* avoiding touch the one without PREF */
1127 if (type & IORESOURCE_PREFETCH)
1128 type = IORESOURCE_PREFETCH;
1129 __pci_setup_bridge(bus, type);
1138 * try to release pci bridge resources that is from leaf bridge,
1139 * so we can allocate big new one later
1141 static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
1143 enum release_type rel_type)
1145 struct pci_dev *dev;
1146 bool is_leaf_bridge = true;
1148 list_for_each_entry(dev, &bus->devices, bus_list) {
1149 struct pci_bus *b = dev->subordinate;
1153 is_leaf_bridge = false;
1155 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1158 if (rel_type == whole_subtree)
1159 pci_bus_release_bridge_resources(b, type,
1163 if (pci_is_root_bus(bus))
1166 if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1169 if ((rel_type == whole_subtree) || is_leaf_bridge)
1170 pci_bridge_release_resources(bus, type);
1173 static void pci_bus_dump_res(struct pci_bus *bus)
1175 struct resource *res;
1178 pci_bus_for_each_resource(bus, res, i) {
1179 if (!res || !res->end || !res->flags)
1182 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
1186 static void pci_bus_dump_resources(struct pci_bus *bus)
1189 struct pci_dev *dev;
1192 pci_bus_dump_res(bus);
1194 list_for_each_entry(dev, &bus->devices, bus_list) {
1195 b = dev->subordinate;
1199 pci_bus_dump_resources(b);
1203 static int __init pci_bus_get_depth(struct pci_bus *bus)
1206 struct pci_dev *dev;
1208 list_for_each_entry(dev, &bus->devices, bus_list) {
1210 struct pci_bus *b = dev->subordinate;
1214 ret = pci_bus_get_depth(b);
1215 if (ret + 1 > depth)
1221 static int __init pci_get_max_depth(void)
1224 struct pci_bus *bus;
1226 list_for_each_entry(bus, &pci_root_buses, node) {
1229 ret = pci_bus_get_depth(bus);
1239 * first try will not touch pci bridge res
1240 * second and later try will clear small leaf bridge res
1241 * will stop till to the max deepth if can not find good one
1244 pci_assign_unassigned_resources(void)
1246 struct pci_bus *bus;
1247 LIST_HEAD(realloc_head); /* list of resources that
1248 want additional resources */
1249 struct list_head *add_list = NULL;
1250 int tried_times = 0;
1251 enum release_type rel_type = leaf_only;
1252 LIST_HEAD(fail_head);
1253 struct pci_dev_resource *dev_res_x;
1254 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
1255 IORESOURCE_PREFETCH;
1256 unsigned long failed_type;
1257 int pci_try_num = 1;
1259 /* don't realloc if asked to do so */
1260 if (pci_realloc_enabled()) {
1261 int max_depth = pci_get_max_depth();
1263 pci_try_num = max_depth + 1;
1264 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
1265 max_depth, pci_try_num);
1270 * last try will use add_list, otherwise will try good to have as
1271 * must have, so can realloc parent bridge resource
1273 if (tried_times + 1 == pci_try_num)
1274 add_list = &realloc_head;
1275 /* Depth first, calculate sizes and alignments of all
1276 subordinate buses. */
1277 list_for_each_entry(bus, &pci_root_buses, node)
1278 __pci_bus_size_bridges(bus, add_list);
1280 /* Depth last, allocate resources and update the hardware. */
1281 list_for_each_entry(bus, &pci_root_buses, node)
1282 __pci_bus_assign_resources(bus, add_list, &fail_head);
1284 BUG_ON(!list_empty(add_list));
1287 /* any device complain? */
1288 if (list_empty(&fail_head))
1289 goto enable_and_dump;
1292 list_for_each_entry(dev_res_x, &fail_head, list)
1293 failed_type |= dev_res_x->flags;
1296 * io port are tight, don't try extra
1297 * or if reach the limit, don't want to try more
1299 failed_type &= type_mask;
1300 if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
1301 free_list(pci_dev_resource, &fail_head);
1302 goto enable_and_dump;
1305 printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
1308 /* third times and later will not check if it is leaf */
1309 if ((tried_times + 1) > 2)
1310 rel_type = whole_subtree;
1313 * Try to release leaf bridge's resources that doesn't fit resource of
1314 * child device under that bridge
1316 list_for_each_entry(dev_res_x, &fail_head, list) {
1317 bus = dev_res_x->dev->bus;
1318 pci_bus_release_bridge_resources(bus,
1319 dev_res_x->flags & type_mask,
1322 /* restore size and flags */
1323 list_for_each_entry(dev_res_x, &fail_head, list) {
1324 struct resource *res = dev_res_x->res;
1326 res->start = dev_res_x->start;
1327 res->end = dev_res_x->end;
1328 res->flags = dev_res_x->flags;
1329 if (dev_res_x->dev->subordinate)
1332 free_list(pci_dev_resource, &fail_head);
1337 /* Depth last, update the hardware. */
1338 list_for_each_entry(bus, &pci_root_buses, node)
1339 pci_enable_bridges(bus);
1341 /* dump the resource on buses */
1342 list_for_each_entry(bus, &pci_root_buses, node)
1343 pci_bus_dump_resources(bus);
1346 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
1348 struct pci_bus *parent = bridge->subordinate;
1349 LIST_HEAD(add_list); /* list of resources that
1350 want additional resources */
1351 int tried_times = 0;
1352 LIST_HEAD(fail_head);
1353 struct pci_dev_resource *dev_res_x;
1355 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
1356 IORESOURCE_PREFETCH;
1359 __pci_bus_size_bridges(parent, &add_list);
1360 __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
1361 BUG_ON(!list_empty(&add_list));
1364 if (list_empty(&fail_head))
1367 if (tried_times >= 2) {
1368 /* still fail, don't need to try more */
1369 free_list(pci_dev_resource, &fail_head);
1373 printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
1377 * Try to release leaf bridge's resources that doesn't fit resource of
1378 * child device under that bridge
1380 list_for_each_entry(dev_res_x, &fail_head, list) {
1381 struct pci_bus *bus = dev_res_x->dev->bus;
1382 unsigned long flags = dev_res_x->flags;
1384 pci_bus_release_bridge_resources(bus, flags & type_mask,
1387 /* restore size and flags */
1388 list_for_each_entry(dev_res_x, &fail_head, list) {
1389 struct resource *res = dev_res_x->res;
1391 res->start = dev_res_x->start;
1392 res->end = dev_res_x->end;
1393 res->flags = dev_res_x->flags;
1394 if (dev_res_x->dev->subordinate)
1397 free_list(pci_dev_resource, &fail_head);
1402 retval = pci_reenable_device(bridge);
1403 pci_set_master(bridge);
1404 pci_enable_bridges(parent);
1406 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
1408 #ifdef CONFIG_HOTPLUG
1410 * pci_rescan_bus - scan a PCI bus for devices.
1411 * @bus: PCI bus to scan
1413 * Scan a PCI bus and child buses for new devices, adds them,
1416 * Returns the max number of subordinate bus discovered.
1418 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1421 struct pci_dev *dev;
1422 LIST_HEAD(add_list); /* list of resources that
1423 want additional resources */
1425 max = pci_scan_child_bus(bus);
1427 down_read(&pci_bus_sem);
1428 list_for_each_entry(dev, &bus->devices, bus_list)
1429 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1430 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1431 if (dev->subordinate)
1432 __pci_bus_size_bridges(dev->subordinate,
1434 up_read(&pci_bus_sem);
1435 __pci_bus_assign_resources(bus, &add_list, NULL);
1436 BUG_ON(!list_empty(&add_list));
1438 pci_enable_bridges(bus);
1439 pci_bus_add_devices(bus);
1443 EXPORT_SYMBOL_GPL(pci_rescan_bus);