Revert "PCI: fix IDE legacy mode resources"
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include "pci.h"
13
14 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
15 #define CARDBUS_RESERVE_BUSNR 3
16 #define PCI_CFG_SPACE_SIZE 256
17 #define PCI_CFG_SPACE_EXP_SIZE 4096
18
19 /* Ugh. Need to stop exporting this to modules. */
20 LIST_HEAD(pci_root_buses);
21 EXPORT_SYMBOL(pci_root_buses);
22
23 LIST_HEAD(pci_devices);
24
25 /*
26 * Some device drivers need know if pci is initiated.
27 * Basically, we think pci is not initiated when there
28 * is no device in list of pci_devices.
29 */
30 int no_pci_devices(void)
31 {
32 return list_empty(&pci_devices);
33 }
34
35 EXPORT_SYMBOL(no_pci_devices);
36
37 #ifdef HAVE_PCI_LEGACY
38 /**
39 * pci_create_legacy_files - create legacy I/O port and memory files
40 * @b: bus to create files under
41 *
42 * Some platforms allow access to legacy I/O port and ISA memory space on
43 * a per-bus basis. This routine creates the files and ties them into
44 * their associated read, write and mmap files from pci-sysfs.c
45 */
46 static void pci_create_legacy_files(struct pci_bus *b)
47 {
48 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
49 GFP_ATOMIC);
50 if (b->legacy_io) {
51 b->legacy_io->attr.name = "legacy_io";
52 b->legacy_io->size = 0xffff;
53 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
54 b->legacy_io->read = pci_read_legacy_io;
55 b->legacy_io->write = pci_write_legacy_io;
56 class_device_create_bin_file(&b->class_dev, b->legacy_io);
57
58 /* Allocated above after the legacy_io struct */
59 b->legacy_mem = b->legacy_io + 1;
60 b->legacy_mem->attr.name = "legacy_mem";
61 b->legacy_mem->size = 1024*1024;
62 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
63 b->legacy_mem->mmap = pci_mmap_legacy_mem;
64 class_device_create_bin_file(&b->class_dev, b->legacy_mem);
65 }
66 }
67
68 void pci_remove_legacy_files(struct pci_bus *b)
69 {
70 if (b->legacy_io) {
71 class_device_remove_bin_file(&b->class_dev, b->legacy_io);
72 class_device_remove_bin_file(&b->class_dev, b->legacy_mem);
73 kfree(b->legacy_io); /* both are allocated here */
74 }
75 }
76 #else /* !HAVE_PCI_LEGACY */
77 static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
78 void pci_remove_legacy_files(struct pci_bus *bus) { return; }
79 #endif /* HAVE_PCI_LEGACY */
80
81 /*
82 * PCI Bus Class Devices
83 */
84 static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev,
85 char *buf)
86 {
87 int ret;
88 cpumask_t cpumask;
89
90 cpumask = pcibus_to_cpumask(to_pci_bus(class_dev));
91 ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask);
92 if (ret < PAGE_SIZE)
93 buf[ret++] = '\n';
94 return ret;
95 }
96 CLASS_DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpuaffinity, NULL);
97
98 /*
99 * PCI Bus Class
100 */
101 static void release_pcibus_dev(struct class_device *class_dev)
102 {
103 struct pci_bus *pci_bus = to_pci_bus(class_dev);
104
105 if (pci_bus->bridge)
106 put_device(pci_bus->bridge);
107 kfree(pci_bus);
108 }
109
110 static struct class pcibus_class = {
111 .name = "pci_bus",
112 .release = &release_pcibus_dev,
113 };
114
115 static int __init pcibus_class_init(void)
116 {
117 return class_register(&pcibus_class);
118 }
119 postcore_initcall(pcibus_class_init);
120
121 /*
122 * Translate the low bits of the PCI base
123 * to the resource type
124 */
125 static inline unsigned int pci_calc_resource_flags(unsigned int flags)
126 {
127 if (flags & PCI_BASE_ADDRESS_SPACE_IO)
128 return IORESOURCE_IO;
129
130 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
131 return IORESOURCE_MEM | IORESOURCE_PREFETCH;
132
133 return IORESOURCE_MEM;
134 }
135
136 /*
137 * Find the extent of a PCI decode..
138 */
139 static u32 pci_size(u32 base, u32 maxbase, u32 mask)
140 {
141 u32 size = mask & maxbase; /* Find the significant bits */
142 if (!size)
143 return 0;
144
145 /* Get the lowest of them to find the decode size, and
146 from that the extent. */
147 size = (size & ~(size-1)) - 1;
148
149 /* base == maxbase can be valid only if the BAR has
150 already been programmed with all 1s. */
151 if (base == maxbase && ((base | size) & mask) != mask)
152 return 0;
153
154 return size;
155 }
156
157 static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
158 {
159 u64 size = mask & maxbase; /* Find the significant bits */
160 if (!size)
161 return 0;
162
163 /* Get the lowest of them to find the decode size, and
164 from that the extent. */
165 size = (size & ~(size-1)) - 1;
166
167 /* base == maxbase can be valid only if the BAR has
168 already been programmed with all 1s. */
169 if (base == maxbase && ((base | size) & mask) != mask)
170 return 0;
171
172 return size;
173 }
174
175 static inline int is_64bit_memory(u32 mask)
176 {
177 if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
178 (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
179 return 1;
180 return 0;
181 }
182
183 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
184 {
185 unsigned int pos, reg, next;
186 u32 l, sz;
187 struct resource *res;
188
189 for(pos=0; pos<howmany; pos = next) {
190 u64 l64;
191 u64 sz64;
192 u32 raw_sz;
193
194 next = pos+1;
195 res = &dev->resource[pos];
196 res->name = pci_name(dev);
197 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
198 pci_read_config_dword(dev, reg, &l);
199 pci_write_config_dword(dev, reg, ~0);
200 pci_read_config_dword(dev, reg, &sz);
201 pci_write_config_dword(dev, reg, l);
202 if (!sz || sz == 0xffffffff)
203 continue;
204 if (l == 0xffffffff)
205 l = 0;
206 raw_sz = sz;
207 if ((l & PCI_BASE_ADDRESS_SPACE) ==
208 PCI_BASE_ADDRESS_SPACE_MEMORY) {
209 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
210 /*
211 * For 64bit prefetchable memory sz could be 0, if the
212 * real size is bigger than 4G, so we need to check
213 * szhi for that.
214 */
215 if (!is_64bit_memory(l) && !sz)
216 continue;
217 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
218 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
219 } else {
220 sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
221 if (!sz)
222 continue;
223 res->start = l & PCI_BASE_ADDRESS_IO_MASK;
224 res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
225 }
226 res->end = res->start + (unsigned long) sz;
227 res->flags |= pci_calc_resource_flags(l);
228 if (is_64bit_memory(l)) {
229 u32 szhi, lhi;
230
231 pci_read_config_dword(dev, reg+4, &lhi);
232 pci_write_config_dword(dev, reg+4, ~0);
233 pci_read_config_dword(dev, reg+4, &szhi);
234 pci_write_config_dword(dev, reg+4, lhi);
235 sz64 = ((u64)szhi << 32) | raw_sz;
236 l64 = ((u64)lhi << 32) | l;
237 sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
238 next++;
239 #if BITS_PER_LONG == 64
240 if (!sz64) {
241 res->start = 0;
242 res->end = 0;
243 res->flags = 0;
244 continue;
245 }
246 res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK;
247 res->end = res->start + sz64;
248 #else
249 if (sz64 > 0x100000000ULL) {
250 printk(KERN_ERR "PCI: Unable to handle 64-bit "
251 "BAR for device %s\n", pci_name(dev));
252 res->start = 0;
253 res->flags = 0;
254 } else if (lhi) {
255 /* 64-bit wide address, treat as disabled */
256 pci_write_config_dword(dev, reg,
257 l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
258 pci_write_config_dword(dev, reg+4, 0);
259 res->start = 0;
260 res->end = sz;
261 }
262 #endif
263 }
264 }
265 if (rom) {
266 dev->rom_base_reg = rom;
267 res = &dev->resource[PCI_ROM_RESOURCE];
268 res->name = pci_name(dev);
269 pci_read_config_dword(dev, rom, &l);
270 pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE);
271 pci_read_config_dword(dev, rom, &sz);
272 pci_write_config_dword(dev, rom, l);
273 if (l == 0xffffffff)
274 l = 0;
275 if (sz && sz != 0xffffffff) {
276 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
277 if (sz) {
278 res->flags = (l & IORESOURCE_ROM_ENABLE) |
279 IORESOURCE_MEM | IORESOURCE_READONLY;
280 res->start = l & PCI_ROM_ADDRESS_MASK;
281 res->end = res->start + (unsigned long) sz;
282 }
283 }
284 }
285 }
286
287 void pci_read_bridge_bases(struct pci_bus *child)
288 {
289 struct pci_dev *dev = child->self;
290 u8 io_base_lo, io_limit_lo;
291 u16 mem_base_lo, mem_limit_lo;
292 unsigned long base, limit;
293 struct resource *res;
294 int i;
295
296 if (!dev) /* It's a host bus, nothing to read */
297 return;
298
299 if (dev->transparent) {
300 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev));
301 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
302 child->resource[i] = child->parent->resource[i - 3];
303 }
304
305 for(i=0; i<3; i++)
306 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
307
308 res = child->resource[0];
309 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
310 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
311 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
312 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
313
314 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
315 u16 io_base_hi, io_limit_hi;
316 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
317 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
318 base |= (io_base_hi << 16);
319 limit |= (io_limit_hi << 16);
320 }
321
322 if (base <= limit) {
323 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
324 if (!res->start)
325 res->start = base;
326 if (!res->end)
327 res->end = limit + 0xfff;
328 }
329
330 res = child->resource[1];
331 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
332 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
333 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
334 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
335 if (base <= limit) {
336 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
337 res->start = base;
338 res->end = limit + 0xfffff;
339 }
340
341 res = child->resource[2];
342 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
343 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
344 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
345 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
346
347 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
348 u32 mem_base_hi, mem_limit_hi;
349 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
350 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
351
352 /*
353 * Some bridges set the base > limit by default, and some
354 * (broken) BIOSes do not initialize them. If we find
355 * this, just assume they are not being used.
356 */
357 if (mem_base_hi <= mem_limit_hi) {
358 #if BITS_PER_LONG == 64
359 base |= ((long) mem_base_hi) << 32;
360 limit |= ((long) mem_limit_hi) << 32;
361 #else
362 if (mem_base_hi || mem_limit_hi) {
363 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev));
364 return;
365 }
366 #endif
367 }
368 }
369 if (base <= limit) {
370 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
371 res->start = base;
372 res->end = limit + 0xfffff;
373 }
374 }
375
376 static struct pci_bus * pci_alloc_bus(void)
377 {
378 struct pci_bus *b;
379
380 b = kzalloc(sizeof(*b), GFP_KERNEL);
381 if (b) {
382 INIT_LIST_HEAD(&b->node);
383 INIT_LIST_HEAD(&b->children);
384 INIT_LIST_HEAD(&b->devices);
385 }
386 return b;
387 }
388
389 static struct pci_bus * __devinit
390 pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr)
391 {
392 struct pci_bus *child;
393 int i;
394 int retval;
395
396 /*
397 * Allocate a new bus, and inherit stuff from the parent..
398 */
399 child = pci_alloc_bus();
400 if (!child)
401 return NULL;
402
403 child->self = bridge;
404 child->parent = parent;
405 child->ops = parent->ops;
406 child->sysdata = parent->sysdata;
407 child->bus_flags = parent->bus_flags;
408 child->bridge = get_device(&bridge->dev);
409
410 child->class_dev.class = &pcibus_class;
411 sprintf(child->class_dev.class_id, "%04x:%02x", pci_domain_nr(child), busnr);
412 retval = class_device_register(&child->class_dev);
413 if (retval)
414 goto error_register;
415 retval = class_device_create_file(&child->class_dev,
416 &class_device_attr_cpuaffinity);
417 if (retval)
418 goto error_file_create;
419
420 /*
421 * Set up the primary, secondary and subordinate
422 * bus numbers.
423 */
424 child->number = child->secondary = busnr;
425 child->primary = parent->secondary;
426 child->subordinate = 0xff;
427
428 /* Set up default resource pointers and names.. */
429 for (i = 0; i < 4; i++) {
430 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
431 child->resource[i]->name = child->name;
432 }
433 bridge->subordinate = child;
434
435 return child;
436
437 error_file_create:
438 class_device_unregister(&child->class_dev);
439 error_register:
440 kfree(child);
441 return NULL;
442 }
443
444 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
445 {
446 struct pci_bus *child;
447
448 child = pci_alloc_child_bus(parent, dev, busnr);
449 if (child) {
450 down_write(&pci_bus_sem);
451 list_add_tail(&child->node, &parent->children);
452 up_write(&pci_bus_sem);
453 }
454 return child;
455 }
456
457 static void pci_enable_crs(struct pci_dev *dev)
458 {
459 u16 cap, rpctl;
460 int rpcap = pci_find_capability(dev, PCI_CAP_ID_EXP);
461 if (!rpcap)
462 return;
463
464 pci_read_config_word(dev, rpcap + PCI_CAP_FLAGS, &cap);
465 if (((cap & PCI_EXP_FLAGS_TYPE) >> 4) != PCI_EXP_TYPE_ROOT_PORT)
466 return;
467
468 pci_read_config_word(dev, rpcap + PCI_EXP_RTCTL, &rpctl);
469 rpctl |= PCI_EXP_RTCTL_CRSSVE;
470 pci_write_config_word(dev, rpcap + PCI_EXP_RTCTL, rpctl);
471 }
472
473 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
474 {
475 struct pci_bus *parent = child->parent;
476
477 /* Attempts to fix that up are really dangerous unless
478 we're going to re-assign all bus numbers. */
479 if (!pcibios_assign_all_busses())
480 return;
481
482 while (parent->parent && parent->subordinate < max) {
483 parent->subordinate = max;
484 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
485 parent = parent->parent;
486 }
487 }
488
489 unsigned int pci_scan_child_bus(struct pci_bus *bus);
490
491 /*
492 * If it's a bridge, configure it and scan the bus behind it.
493 * For CardBus bridges, we don't scan behind as the devices will
494 * be handled by the bridge driver itself.
495 *
496 * We need to process bridges in two passes -- first we scan those
497 * already configured by the BIOS and after we are done with all of
498 * them, we proceed to assigning numbers to the remaining buses in
499 * order to avoid overlaps between old and new bus numbers.
500 */
501 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
502 {
503 struct pci_bus *child;
504 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
505 u32 buses, i, j = 0;
506 u16 bctl;
507
508 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
509
510 pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n",
511 pci_name(dev), buses & 0xffffff, pass);
512
513 /* Disable MasterAbortMode during probing to avoid reporting
514 of bus errors (in some architectures) */
515 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
516 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
517 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
518
519 pci_enable_crs(dev);
520
521 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
522 unsigned int cmax, busnr;
523 /*
524 * Bus already configured by firmware, process it in the first
525 * pass and just note the configuration.
526 */
527 if (pass)
528 goto out;
529 busnr = (buses >> 8) & 0xFF;
530
531 /*
532 * If we already got to this bus through a different bridge,
533 * ignore it. This can happen with the i450NX chipset.
534 */
535 if (pci_find_bus(pci_domain_nr(bus), busnr)) {
536 printk(KERN_INFO "PCI: Bus %04x:%02x already known\n",
537 pci_domain_nr(bus), busnr);
538 goto out;
539 }
540
541 child = pci_add_new_bus(bus, dev, busnr);
542 if (!child)
543 goto out;
544 child->primary = buses & 0xFF;
545 child->subordinate = (buses >> 16) & 0xFF;
546 child->bridge_ctl = bctl;
547
548 cmax = pci_scan_child_bus(child);
549 if (cmax > max)
550 max = cmax;
551 if (child->subordinate > max)
552 max = child->subordinate;
553 } else {
554 /*
555 * We need to assign a number to this bus which we always
556 * do in the second pass.
557 */
558 if (!pass) {
559 if (pcibios_assign_all_busses())
560 /* Temporarily disable forwarding of the
561 configuration cycles on all bridges in
562 this bus segment to avoid possible
563 conflicts in the second pass between two
564 bridges programmed with overlapping
565 bus ranges. */
566 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
567 buses & ~0xffffff);
568 goto out;
569 }
570
571 /* Clear errors */
572 pci_write_config_word(dev, PCI_STATUS, 0xffff);
573
574 /* Prevent assigning a bus number that already exists.
575 * This can happen when a bridge is hot-plugged */
576 if (pci_find_bus(pci_domain_nr(bus), max+1))
577 goto out;
578 child = pci_add_new_bus(bus, dev, ++max);
579 buses = (buses & 0xff000000)
580 | ((unsigned int)(child->primary) << 0)
581 | ((unsigned int)(child->secondary) << 8)
582 | ((unsigned int)(child->subordinate) << 16);
583
584 /*
585 * yenta.c forces a secondary latency timer of 176.
586 * Copy that behaviour here.
587 */
588 if (is_cardbus) {
589 buses &= ~0xff000000;
590 buses |= CARDBUS_LATENCY_TIMER << 24;
591 }
592
593 /*
594 * We need to blast all three values with a single write.
595 */
596 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
597
598 if (!is_cardbus) {
599 child->bridge_ctl = bctl;
600 /*
601 * Adjust subordinate busnr in parent buses.
602 * We do this before scanning for children because
603 * some devices may not be detected if the bios
604 * was lazy.
605 */
606 pci_fixup_parent_subordinate_busnr(child, max);
607 /* Now we can scan all subordinate buses... */
608 max = pci_scan_child_bus(child);
609 /*
610 * now fix it up again since we have found
611 * the real value of max.
612 */
613 pci_fixup_parent_subordinate_busnr(child, max);
614 } else {
615 /*
616 * For CardBus bridges, we leave 4 bus numbers
617 * as cards with a PCI-to-PCI bridge can be
618 * inserted later.
619 */
620 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
621 struct pci_bus *parent = bus;
622 if (pci_find_bus(pci_domain_nr(bus),
623 max+i+1))
624 break;
625 while (parent->parent) {
626 if ((!pcibios_assign_all_busses()) &&
627 (parent->subordinate > max) &&
628 (parent->subordinate <= max+i)) {
629 j = 1;
630 }
631 parent = parent->parent;
632 }
633 if (j) {
634 /*
635 * Often, there are two cardbus bridges
636 * -- try to leave one valid bus number
637 * for each one.
638 */
639 i /= 2;
640 break;
641 }
642 }
643 max += i;
644 pci_fixup_parent_subordinate_busnr(child, max);
645 }
646 /*
647 * Set the subordinate bus number to its real value.
648 */
649 child->subordinate = max;
650 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
651 }
652
653 sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
654
655 /* Has only triggered on CardBus, fixup is in yenta_socket */
656 while (bus->parent) {
657 if ((child->subordinate > bus->subordinate) ||
658 (child->number > bus->subordinate) ||
659 (child->number < bus->number) ||
660 (child->subordinate < bus->number)) {
661 pr_debug("PCI: Bus #%02x (-#%02x) is %s"
662 "hidden behind%s bridge #%02x (-#%02x)\n",
663 child->number, child->subordinate,
664 (bus->number > child->subordinate &&
665 bus->subordinate < child->number) ?
666 "wholly " : " partially",
667 bus->self->transparent ? " transparent" : " ",
668 bus->number, bus->subordinate);
669 }
670 bus = bus->parent;
671 }
672
673 out:
674 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
675
676 return max;
677 }
678
679 /*
680 * Read interrupt line and base address registers.
681 * The architecture-dependent code can tweak these, of course.
682 */
683 static void pci_read_irq(struct pci_dev *dev)
684 {
685 unsigned char irq;
686
687 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
688 dev->pin = irq;
689 if (irq)
690 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
691 dev->irq = irq;
692 }
693
694 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
695
696 /**
697 * pci_setup_device - fill in class and map information of a device
698 * @dev: the device structure to fill
699 *
700 * Initialize the device structure with information about the device's
701 * vendor,class,memory and IO-space addresses,IRQ lines etc.
702 * Called at initialisation of the PCI subsystem and by CardBus services.
703 * Returns 0 on success and -1 if unknown type of device (not normal, bridge
704 * or CardBus).
705 */
706 static int pci_setup_device(struct pci_dev * dev)
707 {
708 u32 class;
709
710 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
711 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
712
713 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
714 dev->revision = class & 0xff;
715 class >>= 8; /* upper 3 bytes */
716 dev->class = class;
717 class >>= 8;
718
719 pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev),
720 dev->vendor, dev->device, class, dev->hdr_type);
721
722 /* "Unknown power state" */
723 dev->current_state = PCI_UNKNOWN;
724
725 /* Early fixups, before probing the BARs */
726 pci_fixup_device(pci_fixup_early, dev);
727 class = dev->class >> 8;
728
729 switch (dev->hdr_type) { /* header type */
730 case PCI_HEADER_TYPE_NORMAL: /* standard header */
731 if (class == PCI_CLASS_BRIDGE_PCI)
732 goto bad;
733 pci_read_irq(dev);
734 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
735 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
736 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
737
738 /*
739 * Do the ugly legacy mode stuff here rather than broken chip
740 * quirk code. Legacy mode ATA controllers have fixed
741 * addresses. These are not always echoed in BAR0-3, and
742 * BAR0-3 in a few cases contain junk!
743 */
744 if (class == PCI_CLASS_STORAGE_IDE) {
745 u8 progif;
746 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
747 if ((progif & 1) == 0) {
748 dev->resource[0].start = 0x1F0;
749 dev->resource[0].end = 0x1F7;
750 dev->resource[0].flags = LEGACY_IO_RESOURCE;
751 dev->resource[1].start = 0x3F6;
752 dev->resource[1].end = 0x3F6;
753 dev->resource[1].flags = LEGACY_IO_RESOURCE;
754 }
755 if ((progif & 4) == 0) {
756 dev->resource[2].start = 0x170;
757 dev->resource[2].end = 0x177;
758 dev->resource[2].flags = LEGACY_IO_RESOURCE;
759 dev->resource[3].start = 0x376;
760 dev->resource[3].end = 0x376;
761 dev->resource[3].flags = LEGACY_IO_RESOURCE;
762 }
763 }
764 break;
765
766 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
767 if (class != PCI_CLASS_BRIDGE_PCI)
768 goto bad;
769 /* The PCI-to-PCI bridge spec requires that subtractive
770 decoding (i.e. transparent) bridge must have programming
771 interface code of 0x01. */
772 pci_read_irq(dev);
773 dev->transparent = ((dev->class & 0xff) == 1);
774 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
775 break;
776
777 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
778 if (class != PCI_CLASS_BRIDGE_CARDBUS)
779 goto bad;
780 pci_read_irq(dev);
781 pci_read_bases(dev, 1, 0);
782 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
783 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
784 break;
785
786 default: /* unknown header */
787 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
788 pci_name(dev), dev->hdr_type);
789 return -1;
790
791 bad:
792 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
793 pci_name(dev), class, dev->hdr_type);
794 dev->class = PCI_CLASS_NOT_DEFINED;
795 }
796
797 /* We found a fine healthy device, go go go... */
798 return 0;
799 }
800
801 /**
802 * pci_release_dev - free a pci device structure when all users of it are finished.
803 * @dev: device that's been disconnected
804 *
805 * Will be called only by the device core when all users of this pci device are
806 * done.
807 */
808 static void pci_release_dev(struct device *dev)
809 {
810 struct pci_dev *pci_dev;
811
812 pci_dev = to_pci_dev(dev);
813 kfree(pci_dev);
814 }
815
816 static void set_pcie_port_type(struct pci_dev *pdev)
817 {
818 int pos;
819 u16 reg16;
820
821 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
822 if (!pos)
823 return;
824 pdev->is_pcie = 1;
825 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
826 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
827 }
828
829 /**
830 * pci_cfg_space_size - get the configuration space size of the PCI device.
831 * @dev: PCI device
832 *
833 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
834 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
835 * access it. Maybe we don't have a way to generate extended config space
836 * accesses, or the device is behind a reverse Express bridge. So we try
837 * reading the dword at 0x100 which must either be 0 or a valid extended
838 * capability header.
839 */
840 int pci_cfg_space_size(struct pci_dev *dev)
841 {
842 int pos;
843 u32 status;
844
845 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
846 if (!pos) {
847 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
848 if (!pos)
849 goto fail;
850
851 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
852 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
853 goto fail;
854 }
855
856 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
857 goto fail;
858 if (status == 0xffffffff)
859 goto fail;
860
861 return PCI_CFG_SPACE_EXP_SIZE;
862
863 fail:
864 return PCI_CFG_SPACE_SIZE;
865 }
866
867 static void pci_release_bus_bridge_dev(struct device *dev)
868 {
869 kfree(dev);
870 }
871
872 struct pci_dev *alloc_pci_dev(void)
873 {
874 struct pci_dev *dev;
875
876 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
877 if (!dev)
878 return NULL;
879
880 INIT_LIST_HEAD(&dev->global_list);
881 INIT_LIST_HEAD(&dev->bus_list);
882
883 pci_msi_init_pci_dev(dev);
884
885 return dev;
886 }
887 EXPORT_SYMBOL(alloc_pci_dev);
888
889 /*
890 * Read the config data for a PCI device, sanity-check it
891 * and fill in the dev structure...
892 */
893 static struct pci_dev * __devinit
894 pci_scan_device(struct pci_bus *bus, int devfn)
895 {
896 struct pci_dev *dev;
897 u32 l;
898 u8 hdr_type;
899 int delay = 1;
900
901 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
902 return NULL;
903
904 /* some broken boards return 0 or ~0 if a slot is empty: */
905 if (l == 0xffffffff || l == 0x00000000 ||
906 l == 0x0000ffff || l == 0xffff0000)
907 return NULL;
908
909 /* Configuration request Retry Status */
910 while (l == 0xffff0001) {
911 msleep(delay);
912 delay *= 2;
913 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
914 return NULL;
915 /* Card hasn't responded in 60 seconds? Must be stuck. */
916 if (delay > 60 * 1000) {
917 printk(KERN_WARNING "Device %04x:%02x:%02x.%d not "
918 "responding\n", pci_domain_nr(bus),
919 bus->number, PCI_SLOT(devfn),
920 PCI_FUNC(devfn));
921 return NULL;
922 }
923 }
924
925 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
926 return NULL;
927
928 dev = alloc_pci_dev();
929 if (!dev)
930 return NULL;
931
932 dev->bus = bus;
933 dev->sysdata = bus->sysdata;
934 dev->dev.parent = bus->bridge;
935 dev->dev.bus = &pci_bus_type;
936 dev->devfn = devfn;
937 dev->hdr_type = hdr_type & 0x7f;
938 dev->multifunction = !!(hdr_type & 0x80);
939 dev->vendor = l & 0xffff;
940 dev->device = (l >> 16) & 0xffff;
941 dev->cfg_size = pci_cfg_space_size(dev);
942 dev->error_state = pci_channel_io_normal;
943 set_pcie_port_type(dev);
944
945 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
946 set this higher, assuming the system even supports it. */
947 dev->dma_mask = 0xffffffff;
948 if (pci_setup_device(dev) < 0) {
949 kfree(dev);
950 return NULL;
951 }
952
953 return dev;
954 }
955
956 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
957 {
958 device_initialize(&dev->dev);
959 dev->dev.release = pci_release_dev;
960 pci_dev_get(dev);
961
962 set_dev_node(&dev->dev, pcibus_to_node(bus));
963 dev->dev.dma_mask = &dev->dma_mask;
964 dev->dev.coherent_dma_mask = 0xffffffffull;
965
966 /* Fix up broken headers */
967 pci_fixup_device(pci_fixup_header, dev);
968
969 /*
970 * Add the device to our list of discovered devices
971 * and the bus list for fixup functions, etc.
972 */
973 INIT_LIST_HEAD(&dev->global_list);
974 down_write(&pci_bus_sem);
975 list_add_tail(&dev->bus_list, &bus->devices);
976 up_write(&pci_bus_sem);
977 }
978
979 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
980 {
981 struct pci_dev *dev;
982
983 dev = pci_scan_device(bus, devfn);
984 if (!dev)
985 return NULL;
986
987 pci_device_add(dev, bus);
988
989 return dev;
990 }
991
992 /**
993 * pci_scan_slot - scan a PCI slot on a bus for devices.
994 * @bus: PCI bus to scan
995 * @devfn: slot number to scan (must have zero function.)
996 *
997 * Scan a PCI slot on the specified PCI bus for devices, adding
998 * discovered devices to the @bus->devices list. New devices
999 * will have an empty dev->global_list head.
1000 */
1001 int pci_scan_slot(struct pci_bus *bus, int devfn)
1002 {
1003 int func, nr = 0;
1004 int scan_all_fns;
1005
1006 scan_all_fns = pcibios_scan_all_fns(bus, devfn);
1007
1008 for (func = 0; func < 8; func++, devfn++) {
1009 struct pci_dev *dev;
1010
1011 dev = pci_scan_single_device(bus, devfn);
1012 if (dev) {
1013 nr++;
1014
1015 /*
1016 * If this is a single function device,
1017 * don't scan past the first function.
1018 */
1019 if (!dev->multifunction) {
1020 if (func > 0) {
1021 dev->multifunction = 1;
1022 } else {
1023 break;
1024 }
1025 }
1026 } else {
1027 if (func == 0 && !scan_all_fns)
1028 break;
1029 }
1030 }
1031 return nr;
1032 }
1033
1034 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1035 {
1036 unsigned int devfn, pass, max = bus->secondary;
1037 struct pci_dev *dev;
1038
1039 pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
1040
1041 /* Go find them, Rover! */
1042 for (devfn = 0; devfn < 0x100; devfn += 8)
1043 pci_scan_slot(bus, devfn);
1044
1045 /*
1046 * After performing arch-dependent fixup of the bus, look behind
1047 * all PCI-to-PCI bridges on this bus.
1048 */
1049 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
1050 pcibios_fixup_bus(bus);
1051 for (pass=0; pass < 2; pass++)
1052 list_for_each_entry(dev, &bus->devices, bus_list) {
1053 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1054 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1055 max = pci_scan_bridge(bus, dev, max, pass);
1056 }
1057
1058 /*
1059 * We've scanned the bus and so we know all about what's on
1060 * the other side of any bridges that may be on this bus plus
1061 * any devices.
1062 *
1063 * Return how far we've got finding sub-buses.
1064 */
1065 pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n",
1066 pci_domain_nr(bus), bus->number, max);
1067 return max;
1068 }
1069
1070 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
1071 {
1072 unsigned int max;
1073
1074 max = pci_scan_child_bus(bus);
1075
1076 /*
1077 * Make the discovered devices available.
1078 */
1079 pci_bus_add_devices(bus);
1080
1081 return max;
1082 }
1083
1084 struct pci_bus * pci_create_bus(struct device *parent,
1085 int bus, struct pci_ops *ops, void *sysdata)
1086 {
1087 int error;
1088 struct pci_bus *b;
1089 struct device *dev;
1090
1091 b = pci_alloc_bus();
1092 if (!b)
1093 return NULL;
1094
1095 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1096 if (!dev){
1097 kfree(b);
1098 return NULL;
1099 }
1100
1101 b->sysdata = sysdata;
1102 b->ops = ops;
1103
1104 if (pci_find_bus(pci_domain_nr(b), bus)) {
1105 /* If we already got to this bus through a different bridge, ignore it */
1106 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
1107 goto err_out;
1108 }
1109
1110 down_write(&pci_bus_sem);
1111 list_add_tail(&b->node, &pci_root_buses);
1112 up_write(&pci_bus_sem);
1113
1114 memset(dev, 0, sizeof(*dev));
1115 dev->parent = parent;
1116 dev->release = pci_release_bus_bridge_dev;
1117 sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus);
1118 error = device_register(dev);
1119 if (error)
1120 goto dev_reg_err;
1121 b->bridge = get_device(dev);
1122
1123 b->class_dev.class = &pcibus_class;
1124 sprintf(b->class_dev.class_id, "%04x:%02x", pci_domain_nr(b), bus);
1125 error = class_device_register(&b->class_dev);
1126 if (error)
1127 goto class_dev_reg_err;
1128 error = class_device_create_file(&b->class_dev, &class_device_attr_cpuaffinity);
1129 if (error)
1130 goto class_dev_create_file_err;
1131
1132 /* Create legacy_io and legacy_mem files for this bus */
1133 pci_create_legacy_files(b);
1134
1135 error = sysfs_create_link(&b->class_dev.kobj, &b->bridge->kobj, "bridge");
1136 if (error)
1137 goto sys_create_link_err;
1138
1139 b->number = b->secondary = bus;
1140 b->resource[0] = &ioport_resource;
1141 b->resource[1] = &iomem_resource;
1142
1143 return b;
1144
1145 sys_create_link_err:
1146 class_device_remove_file(&b->class_dev, &class_device_attr_cpuaffinity);
1147 class_dev_create_file_err:
1148 class_device_unregister(&b->class_dev);
1149 class_dev_reg_err:
1150 device_unregister(dev);
1151 dev_reg_err:
1152 down_write(&pci_bus_sem);
1153 list_del(&b->node);
1154 up_write(&pci_bus_sem);
1155 err_out:
1156 kfree(dev);
1157 kfree(b);
1158 return NULL;
1159 }
1160 EXPORT_SYMBOL_GPL(pci_create_bus);
1161
1162 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1163 int bus, struct pci_ops *ops, void *sysdata)
1164 {
1165 struct pci_bus *b;
1166
1167 b = pci_create_bus(parent, bus, ops, sysdata);
1168 if (b)
1169 b->subordinate = pci_scan_child_bus(b);
1170 return b;
1171 }
1172 EXPORT_SYMBOL(pci_scan_bus_parented);
1173
1174 #ifdef CONFIG_HOTPLUG
1175 EXPORT_SYMBOL(pci_add_new_bus);
1176 EXPORT_SYMBOL(pci_do_scan_bus);
1177 EXPORT_SYMBOL(pci_scan_slot);
1178 EXPORT_SYMBOL(pci_scan_bridge);
1179 EXPORT_SYMBOL(pci_scan_single_device);
1180 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1181 #endif
1182
1183 static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b)
1184 {
1185 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1186 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1187
1188 if (a->bus->number < b->bus->number) return -1;
1189 else if (a->bus->number > b->bus->number) return 1;
1190
1191 if (a->devfn < b->devfn) return -1;
1192 else if (a->devfn > b->devfn) return 1;
1193
1194 return 0;
1195 }
1196
1197 /*
1198 * Yes, this forcably breaks the klist abstraction temporarily. It
1199 * just wants to sort the klist, not change reference counts and
1200 * take/drop locks rapidly in the process. It does all this while
1201 * holding the lock for the list, so objects can't otherwise be
1202 * added/removed while we're swizzling.
1203 */
1204 static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list)
1205 {
1206 struct list_head *pos;
1207 struct klist_node *n;
1208 struct device *dev;
1209 struct pci_dev *b;
1210
1211 list_for_each(pos, list) {
1212 n = container_of(pos, struct klist_node, n_node);
1213 dev = container_of(n, struct device, knode_bus);
1214 b = to_pci_dev(dev);
1215 if (pci_sort_bf_cmp(a, b) <= 0) {
1216 list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node);
1217 return;
1218 }
1219 }
1220 list_move_tail(&a->dev.knode_bus.n_node, list);
1221 }
1222
1223 static void __init pci_sort_breadthfirst_klist(void)
1224 {
1225 LIST_HEAD(sorted_devices);
1226 struct list_head *pos, *tmp;
1227 struct klist_node *n;
1228 struct device *dev;
1229 struct pci_dev *pdev;
1230
1231 spin_lock(&pci_bus_type.klist_devices.k_lock);
1232 list_for_each_safe(pos, tmp, &pci_bus_type.klist_devices.k_list) {
1233 n = container_of(pos, struct klist_node, n_node);
1234 dev = container_of(n, struct device, knode_bus);
1235 pdev = to_pci_dev(dev);
1236 pci_insertion_sort_klist(pdev, &sorted_devices);
1237 }
1238 list_splice(&sorted_devices, &pci_bus_type.klist_devices.k_list);
1239 spin_unlock(&pci_bus_type.klist_devices.k_lock);
1240 }
1241
1242 static void __init pci_insertion_sort_devices(struct pci_dev *a, struct list_head *list)
1243 {
1244 struct pci_dev *b;
1245
1246 list_for_each_entry(b, list, global_list) {
1247 if (pci_sort_bf_cmp(a, b) <= 0) {
1248 list_move_tail(&a->global_list, &b->global_list);
1249 return;
1250 }
1251 }
1252 list_move_tail(&a->global_list, list);
1253 }
1254
1255 static void __init pci_sort_breadthfirst_devices(void)
1256 {
1257 LIST_HEAD(sorted_devices);
1258 struct pci_dev *dev, *tmp;
1259
1260 down_write(&pci_bus_sem);
1261 list_for_each_entry_safe(dev, tmp, &pci_devices, global_list) {
1262 pci_insertion_sort_devices(dev, &sorted_devices);
1263 }
1264 list_splice(&sorted_devices, &pci_devices);
1265 up_write(&pci_bus_sem);
1266 }
1267
1268 void __init pci_sort_breadthfirst(void)
1269 {
1270 pci_sort_breadthfirst_devices();
1271 pci_sort_breadthfirst_klist();
1272 }
1273