[PATCH] getting rid of all casts of k[cmz]alloc() calls
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / pci_32.c
CommitLineData
e05b3b4a
PM
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
e05b3b4a
PM
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
6e99e458 14#include <linux/irq.h>
f90bb153 15#include <linux/list.h>
e05b3b4a
PM
16
17#include <asm/processor.h>
18#include <asm/io.h>
19#include <asm/prom.h>
20#include <asm/sections.h>
21#include <asm/pci-bridge.h>
22#include <asm/byteorder.h>
e05b3b4a
PM
23#include <asm/uaccess.h>
24#include <asm/machdep.h>
25
26#undef DEBUG
27
28#ifdef DEBUG
29#define DBG(x...) printk(x)
30#else
31#define DBG(x...)
32#endif
33
34unsigned long isa_io_base = 0;
35unsigned long isa_mem_base = 0;
36unsigned long pci_dram_offset = 0;
37int pcibios_assign_bus_offset = 1;
38
39void pcibios_make_OF_bus_map(void);
40
41static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
42static int probe_resource(struct pci_bus *parent, struct resource *pr,
43 struct resource *res, struct resource **conflict);
44static void update_bridge_base(struct pci_bus *bus, int i);
45static void pcibios_fixup_resources(struct pci_dev* dev);
46static void fixup_broken_pcnet32(struct pci_dev* dev);
47static int reparent_resources(struct resource *parent, struct resource *res);
48static void fixup_cpc710_pci64(struct pci_dev* dev);
49#ifdef CONFIG_PPC_OF
50static u8* pci_to_OF_bus_map;
51#endif
52
53/* By default, we don't re-assign bus numbers. We do this only on
54 * some pmacs
55 */
56int pci_assign_all_buses;
57
58struct pci_controller* hose_head;
59struct pci_controller** hose_tail = &hose_head;
60
61static int pci_bus_count;
62
63static void
64fixup_broken_pcnet32(struct pci_dev* dev)
65{
66 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
67 dev->vendor = PCI_VENDOR_ID_AMD;
68 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
69 }
70}
71DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
72
73static void
74fixup_cpc710_pci64(struct pci_dev* dev)
75{
76 /* Hide the PCI64 BARs from the kernel as their content doesn't
77 * fit well in the resource management
78 */
79 dev->resource[0].start = dev->resource[0].end = 0;
80 dev->resource[0].flags = 0;
81 dev->resource[1].start = dev->resource[1].end = 0;
82 dev->resource[1].flags = 0;
83}
84DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
85
86static void
87pcibios_fixup_resources(struct pci_dev *dev)
88{
89 struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
90 int i;
91 unsigned long offset;
92
93 if (!hose) {
94 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
95 return;
96 }
97 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
98 struct resource *res = dev->resource + i;
99 if (!res->flags)
100 continue;
101 if (res->end == 0xffffffff) {
685143ac 102 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
872455e2 103 pci_name(dev), i, (u64)res->start, (u64)res->end);
e05b3b4a
PM
104 res->end -= res->start;
105 res->start = 0;
106 res->flags |= IORESOURCE_UNSET;
107 continue;
108 }
109 offset = 0;
110 if (res->flags & IORESOURCE_MEM) {
111 offset = hose->pci_mem_offset;
112 } else if (res->flags & IORESOURCE_IO) {
113 offset = (unsigned long) hose->io_base_virt
114 - isa_io_base;
115 }
116 if (offset != 0) {
117 res->start += offset;
118 res->end += offset;
872455e2
SS
119 DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
120 i, res->flags, pci_name(dev),
121 (u64)res->start - offset, (u64)res->start);
e05b3b4a
PM
122 }
123 }
124
125 /* Call machine specific resource fixup */
126 if (ppc_md.pcibios_fixup_resources)
127 ppc_md.pcibios_fixup_resources(dev);
128}
129DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
130
131void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
132 struct resource *res)
133{
134 unsigned long offset = 0;
135 struct pci_controller *hose = dev->sysdata;
136
137 if (hose && res->flags & IORESOURCE_IO)
138 offset = (unsigned long)hose->io_base_virt - isa_io_base;
139 else if (hose && res->flags & IORESOURCE_MEM)
140 offset = hose->pci_mem_offset;
141 region->start = res->start - offset;
142 region->end = res->end - offset;
143}
144EXPORT_SYMBOL(pcibios_resource_to_bus);
145
146void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
147 struct pci_bus_region *region)
148{
149 unsigned long offset = 0;
150 struct pci_controller *hose = dev->sysdata;
151
152 if (hose && res->flags & IORESOURCE_IO)
153 offset = (unsigned long)hose->io_base_virt - isa_io_base;
154 else if (hose && res->flags & IORESOURCE_MEM)
155 offset = hose->pci_mem_offset;
156 res->start = region->start + offset;
157 res->end = region->end + offset;
158}
159EXPORT_SYMBOL(pcibios_bus_to_resource);
160
161/*
162 * We need to avoid collisions with `mirrored' VGA ports
163 * and other strange ISA hardware, so we always want the
164 * addresses to be allocated in the 0x000-0x0ff region
165 * modulo 0x400.
166 *
167 * Why? Because some silly external IO cards only decode
168 * the low 10 bits of the IO address. The 0x00-0xff region
169 * is reserved for motherboard devices that decode all 16
170 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
171 * but we want to try to avoid allocating at 0x2900-0x2bff
172 * which might have be mirrored at 0x0100-0x03ff..
173 */
e31dd6e4
GKH
174void pcibios_align_resource(void *data, struct resource *res,
175 resource_size_t size, resource_size_t align)
e05b3b4a
PM
176{
177 struct pci_dev *dev = data;
178
179 if (res->flags & IORESOURCE_IO) {
e31dd6e4 180 resource_size_t start = res->start;
e05b3b4a
PM
181
182 if (size > 0x100) {
183 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
685143ac 184 " (%lld bytes)\n", pci_name(dev),
e31dd6e4 185 dev->resource - res, (unsigned long long)size);
e05b3b4a
PM
186 }
187
188 if (start & 0x300) {
189 start = (start + 0x3ff) & ~0x3ff;
190 res->start = start;
191 }
192 }
193}
194EXPORT_SYMBOL(pcibios_align_resource);
195
196/*
197 * Handle resources of PCI devices. If the world were perfect, we could
198 * just allocate all the resource regions and do nothing more. It isn't.
199 * On the other hand, we cannot just re-allocate all devices, as it would
200 * require us to know lots of host bridge internals. So we attempt to
201 * keep as much of the original configuration as possible, but tweak it
202 * when it's found to be wrong.
203 *
204 * Known BIOS problems we have to work around:
205 * - I/O or memory regions not configured
206 * - regions configured, but not enabled in the command register
207 * - bogus I/O addresses above 64K used
208 * - expansion ROMs left enabled (this may sound harmless, but given
209 * the fact the PCI specs explicitly allow address decoders to be
210 * shared between expansion ROMs and other resource regions, it's
211 * at least dangerous)
212 *
213 * Our solution:
214 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
215 * This gives us fixed barriers on where we can allocate.
216 * (2) Allocate resources for all enabled devices. If there is
217 * a collision, just mark the resource as unallocated. Also
218 * disable expansion ROMs during this step.
219 * (3) Try to allocate resources for disabled devices. If the
220 * resources were assigned correctly, everything goes well,
221 * if they weren't, they won't disturb allocation of other
222 * resources.
223 * (4) Assign new addresses to resources which were either
224 * not configured at all or misconfigured. If explicitly
225 * requested by the user, configure expansion ROM address
226 * as well.
227 */
228
229static void __init
230pcibios_allocate_bus_resources(struct list_head *bus_list)
231{
232 struct pci_bus *bus;
233 int i;
234 struct resource *res, *pr;
235
236 /* Depth-First Search on bus tree */
237 list_for_each_entry(bus, bus_list, node) {
238 for (i = 0; i < 4; ++i) {
239 if ((res = bus->resource[i]) == NULL || !res->flags
240 || res->start > res->end)
241 continue;
242 if (bus->parent == NULL)
243 pr = (res->flags & IORESOURCE_IO)?
244 &ioport_resource: &iomem_resource;
245 else {
246 pr = pci_find_parent_resource(bus->self, res);
247 if (pr == res) {
248 /* this happens when the generic PCI
249 * code (wrongly) decides that this
250 * bridge is transparent -- paulus
251 */
252 continue;
253 }
254 }
255
685143ac 256 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
872455e2 257 (u64)res->start, (u64)res->end, res->flags, pr);
e05b3b4a
PM
258 if (pr) {
259 if (request_resource(pr, res) == 0)
260 continue;
261 /*
262 * Must be a conflict with an existing entry.
263 * Move that entry (or entries) under the
264 * bridge resource and try again.
265 */
266 if (reparent_resources(pr, res) == 0)
267 continue;
268 }
269 printk(KERN_ERR "PCI: Cannot allocate resource region "
270 "%d of PCI bridge %d\n", i, bus->number);
271 if (pci_relocate_bridge_resource(bus, i))
272 bus->resource[i] = NULL;
273 }
274 pcibios_allocate_bus_resources(&bus->children);
275 }
276}
277
278/*
279 * Reparent resource children of pr that conflict with res
280 * under res, and make res replace those children.
281 */
282static int __init
283reparent_resources(struct resource *parent, struct resource *res)
284{
285 struct resource *p, **pp;
286 struct resource **firstpp = NULL;
287
288 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
289 if (p->end < res->start)
290 continue;
291 if (res->end < p->start)
292 break;
293 if (p->start < res->start || p->end > res->end)
294 return -1; /* not completely contained */
295 if (firstpp == NULL)
296 firstpp = pp;
297 }
298 if (firstpp == NULL)
299 return -1; /* didn't find any conflicting entries? */
300 res->parent = parent;
301 res->child = *firstpp;
302 res->sibling = *pp;
303 *firstpp = res;
304 *pp = NULL;
305 for (p = res->child; p != NULL; p = p->sibling) {
306 p->parent = res;
685143ac 307 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
872455e2 308 p->name, (u64)p->start, (u64)p->end, res->name);
e05b3b4a
PM
309 }
310 return 0;
311}
312
313/*
314 * A bridge has been allocated a range which is outside the range
315 * of its parent bridge, so it needs to be moved.
316 */
317static int __init
318pci_relocate_bridge_resource(struct pci_bus *bus, int i)
319{
320 struct resource *res, *pr, *conflict;
321 unsigned long try, size;
322 int j;
323 struct pci_bus *parent = bus->parent;
324
325 if (parent == NULL) {
326 /* shouldn't ever happen */
327 printk(KERN_ERR "PCI: can't move host bridge resource\n");
328 return -1;
329 }
330 res = bus->resource[i];
331 if (res == NULL)
332 return -1;
333 pr = NULL;
334 for (j = 0; j < 4; j++) {
335 struct resource *r = parent->resource[j];
336 if (!r)
337 continue;
338 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
339 continue;
340 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
341 pr = r;
342 break;
343 }
344 if (res->flags & IORESOURCE_PREFETCH)
345 pr = r;
346 }
347 if (pr == NULL)
348 return -1;
349 size = res->end - res->start;
350 if (pr->start > pr->end || size > pr->end - pr->start)
351 return -1;
352 try = pr->end;
353 for (;;) {
354 res->start = try - size;
355 res->end = try;
356 if (probe_resource(bus->parent, pr, res, &conflict) == 0)
357 break;
358 if (conflict->start <= pr->start + size)
359 return -1;
360 try = conflict->start - 1;
361 }
362 if (request_resource(pr, res)) {
685143ac 363 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
872455e2 364 (u64)res->start, (u64)res->end);
e05b3b4a
PM
365 return -1; /* "can't happen" */
366 }
367 update_bridge_base(bus, i);
685143ac
GKH
368 printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
369 bus->number, i, (unsigned long long)res->start,
370 (unsigned long long)res->end);
e05b3b4a
PM
371 return 0;
372}
373
374static int __init
375probe_resource(struct pci_bus *parent, struct resource *pr,
376 struct resource *res, struct resource **conflict)
377{
378 struct pci_bus *bus;
379 struct pci_dev *dev;
380 struct resource *r;
381 int i;
382
383 for (r = pr->child; r != NULL; r = r->sibling) {
384 if (r->end >= res->start && res->end >= r->start) {
385 *conflict = r;
386 return 1;
387 }
388 }
389 list_for_each_entry(bus, &parent->children, node) {
390 for (i = 0; i < 4; ++i) {
391 if ((r = bus->resource[i]) == NULL)
392 continue;
393 if (!r->flags || r->start > r->end || r == res)
394 continue;
395 if (pci_find_parent_resource(bus->self, r) != pr)
396 continue;
397 if (r->end >= res->start && res->end >= r->start) {
398 *conflict = r;
399 return 1;
400 }
401 }
402 }
403 list_for_each_entry(dev, &parent->devices, bus_list) {
404 for (i = 0; i < 6; ++i) {
405 r = &dev->resource[i];
406 if (!r->flags || (r->flags & IORESOURCE_UNSET))
407 continue;
408 if (pci_find_parent_resource(dev, r) != pr)
409 continue;
410 if (r->end >= res->start && res->end >= r->start) {
411 *conflict = r;
412 return 1;
413 }
414 }
415 }
416 return 0;
417}
418
419static void __init
420update_bridge_base(struct pci_bus *bus, int i)
421{
422 struct resource *res = bus->resource[i];
423 u8 io_base_lo, io_limit_lo;
424 u16 mem_base, mem_limit;
425 u16 cmd;
426 unsigned long start, end, off;
427 struct pci_dev *dev = bus->self;
428 struct pci_controller *hose = dev->sysdata;
429
430 if (!hose) {
431 printk("update_bridge_base: no hose?\n");
432 return;
433 }
434 pci_read_config_word(dev, PCI_COMMAND, &cmd);
435 pci_write_config_word(dev, PCI_COMMAND,
436 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
437 if (res->flags & IORESOURCE_IO) {
438 off = (unsigned long) hose->io_base_virt - isa_io_base;
439 start = res->start - off;
440 end = res->end - off;
441 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
442 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
60b2a46c 443 if (end > 0xffff)
e05b3b4a 444 io_base_lo |= PCI_IO_RANGE_TYPE_32;
60b2a46c 445 else
e05b3b4a 446 io_base_lo |= PCI_IO_RANGE_TYPE_16;
60b2a46c
RV
447 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
448 start >> 16);
449 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
450 end >> 16);
e05b3b4a
PM
451 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
452 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
453
454 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
455 == IORESOURCE_MEM) {
456 off = hose->pci_mem_offset;
457 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
458 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
459 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
460 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
461
462 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
463 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
464 off = hose->pci_mem_offset;
465 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
466 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
467 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
468 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
469
470 } else {
471 DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
472 pci_name(dev), i, res->flags);
473 }
474 pci_write_config_word(dev, PCI_COMMAND, cmd);
475}
476
477static inline void alloc_resource(struct pci_dev *dev, int idx)
478{
479 struct resource *pr, *r = &dev->resource[idx];
480
685143ac 481 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
872455e2 482 pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
e05b3b4a
PM
483 pr = pci_find_parent_resource(dev, r);
484 if (!pr || request_resource(pr, r) < 0) {
485 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
486 " of device %s\n", idx, pci_name(dev));
487 if (pr)
685143ac 488 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
872455e2 489 pr, (u64)pr->start, (u64)pr->end, pr->flags);
e05b3b4a
PM
490 /* We'll assign a new address later */
491 r->flags |= IORESOURCE_UNSET;
492 r->end -= r->start;
493 r->start = 0;
494 }
495}
496
497static void __init
498pcibios_allocate_resources(int pass)
499{
500 struct pci_dev *dev = NULL;
501 int idx, disabled;
502 u16 command;
503 struct resource *r;
504
505 for_each_pci_dev(dev) {
506 pci_read_config_word(dev, PCI_COMMAND, &command);
507 for (idx = 0; idx < 6; idx++) {
508 r = &dev->resource[idx];
509 if (r->parent) /* Already allocated */
510 continue;
511 if (!r->flags || (r->flags & IORESOURCE_UNSET))
512 continue; /* Not assigned at all */
513 if (r->flags & IORESOURCE_IO)
514 disabled = !(command & PCI_COMMAND_IO);
515 else
516 disabled = !(command & PCI_COMMAND_MEMORY);
517 if (pass == disabled)
518 alloc_resource(dev, idx);
519 }
520 if (pass)
521 continue;
522 r = &dev->resource[PCI_ROM_RESOURCE];
523 if (r->flags & IORESOURCE_ROM_ENABLE) {
524 /* Turn the ROM off, leave the resource region, but keep it unregistered. */
525 u32 reg;
526 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
527 r->flags &= ~IORESOURCE_ROM_ENABLE;
528 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
529 pci_write_config_dword(dev, dev->rom_base_reg,
530 reg & ~PCI_ROM_ADDRESS_ENABLE);
531 }
532 }
533}
534
535static void __init
536pcibios_assign_resources(void)
537{
538 struct pci_dev *dev = NULL;
539 int idx;
540 struct resource *r;
541
542 for_each_pci_dev(dev) {
543 int class = dev->class >> 8;
544
545 /* Don't touch classless devices and host bridges */
546 if (!class || class == PCI_CLASS_BRIDGE_HOST)
547 continue;
548
549 for (idx = 0; idx < 6; idx++) {
550 r = &dev->resource[idx];
551
552 /*
553 * We shall assign a new address to this resource,
554 * either because the BIOS (sic) forgot to do so
555 * or because we have decided the old address was
556 * unusable for some reason.
557 */
558 if ((r->flags & IORESOURCE_UNSET) && r->end &&
559 (!ppc_md.pcibios_enable_device_hook ||
560 !ppc_md.pcibios_enable_device_hook(dev, 1))) {
561 r->flags &= ~IORESOURCE_UNSET;
562 pci_assign_resource(dev, idx);
563 }
564 }
565
566#if 0 /* don't assign ROMs */
567 r = &dev->resource[PCI_ROM_RESOURCE];
568 r->end -= r->start;
569 r->start = 0;
570 if (r->end)
571 pci_assign_resource(dev, PCI_ROM_RESOURCE);
572#endif
573 }
574}
575
576
577int
578pcibios_enable_resources(struct pci_dev *dev, int mask)
579{
580 u16 cmd, old_cmd;
581 int idx;
582 struct resource *r;
583
584 pci_read_config_word(dev, PCI_COMMAND, &cmd);
585 old_cmd = cmd;
586 for (idx=0; idx<6; idx++) {
587 /* Only set up the requested stuff */
588 if (!(mask & (1<<idx)))
589 continue;
590
591 r = &dev->resource[idx];
592 if (r->flags & IORESOURCE_UNSET) {
593 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
594 return -EINVAL;
595 }
596 if (r->flags & IORESOURCE_IO)
597 cmd |= PCI_COMMAND_IO;
598 if (r->flags & IORESOURCE_MEM)
599 cmd |= PCI_COMMAND_MEMORY;
600 }
601 if (dev->resource[PCI_ROM_RESOURCE].start)
602 cmd |= PCI_COMMAND_MEMORY;
603 if (cmd != old_cmd) {
604 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
605 pci_write_config_word(dev, PCI_COMMAND, cmd);
606 }
607 return 0;
608}
609
610static int next_controller_index;
611
612struct pci_controller * __init
613pcibios_alloc_controller(void)
614{
615 struct pci_controller *hose;
616
617 hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
618 memset(hose, 0, sizeof(struct pci_controller));
619
620 *hose_tail = hose;
621 hose_tail = &hose->next;
622
623 hose->index = next_controller_index++;
624
625 return hose;
626}
627
628#ifdef CONFIG_PPC_OF
629/*
630 * Functions below are used on OpenFirmware machines.
631 */
632static void
633make_one_node_map(struct device_node* node, u8 pci_bus)
634{
a7f67bdf 635 const int *bus_range;
e05b3b4a
PM
636 int len;
637
638 if (pci_bus >= pci_bus_count)
639 return;
a7f67bdf 640 bus_range = get_property(node, "bus-range", &len);
e05b3b4a
PM
641 if (bus_range == NULL || len < 2 * sizeof(int)) {
642 printk(KERN_WARNING "Can't get bus-range for %s, "
643 "assuming it starts at 0\n", node->full_name);
644 pci_to_OF_bus_map[pci_bus] = 0;
645 } else
646 pci_to_OF_bus_map[pci_bus] = bus_range[0];
647
648 for (node=node->child; node != 0;node = node->sibling) {
649 struct pci_dev* dev;
a7f67bdf 650 const unsigned int *class_code, *reg;
e05b3b4a 651
a7f67bdf 652 class_code = get_property(node, "class-code", NULL);
e05b3b4a
PM
653 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
654 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
655 continue;
a7f67bdf 656 reg = get_property(node, "reg", NULL);
e05b3b4a
PM
657 if (!reg)
658 continue;
659 dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
660 if (!dev || !dev->subordinate)
661 continue;
662 make_one_node_map(node, dev->subordinate->number);
663 }
664}
665
666void
667pcibios_make_OF_bus_map(void)
668{
669 int i;
670 struct pci_controller* hose;
a7f67bdf 671 struct property *map_prop;
e05b3b4a 672
5cbded58 673 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
e05b3b4a
PM
674 if (!pci_to_OF_bus_map) {
675 printk(KERN_ERR "Can't allocate OF bus map !\n");
676 return;
677 }
678
679 /* We fill the bus map with invalid values, that helps
680 * debugging.
681 */
682 for (i=0; i<pci_bus_count; i++)
683 pci_to_OF_bus_map[i] = 0xff;
684
685 /* For each hose, we begin searching bridges */
686 for(hose=hose_head; hose; hose=hose->next) {
687 struct device_node* node;
688 node = (struct device_node *)hose->arch_data;
689 if (!node)
690 continue;
691 make_one_node_map(node, hose->first_busno);
692 }
a7f67bdf
JK
693 map_prop = of_find_property(find_path_device("/"),
694 "pci-OF-bus-map", NULL);
695 if (map_prop) {
696 BUG_ON(pci_bus_count > map_prop->length);
697 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
698 }
e05b3b4a
PM
699#ifdef DEBUG
700 printk("PCI->OF bus map:\n");
701 for (i=0; i<pci_bus_count; i++) {
702 if (pci_to_OF_bus_map[i] == 0xff)
703 continue;
704 printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
705 }
706#endif
707}
708
709typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
710
711static struct device_node*
712scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
713{
714 struct device_node* sub_node;
715
716 for (; node != 0;node = node->sibling) {
a7f67bdf 717 const unsigned int *class_code;
e05b3b4a
PM
718
719 if (filter(node, data))
720 return node;
721
722 /* For PCI<->PCI bridges or CardBus bridges, we go down
723 * Note: some OFs create a parent node "multifunc-device" as
724 * a fake root for all functions of a multi-function device,
725 * we go down them as well.
726 */
a7f67bdf 727 class_code = get_property(node, "class-code", NULL);
e05b3b4a
PM
728 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
729 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
730 strcmp(node->name, "multifunc-device"))
731 continue;
732 sub_node = scan_OF_pci_childs(node->child, filter, data);
733 if (sub_node)
734 return sub_node;
735 }
736 return NULL;
737}
738
dae4828d
BH
739static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
740 unsigned int devfn)
e05b3b4a 741{
dae4828d
BH
742 struct device_node *np = NULL;
743 const u32 *reg;
744 unsigned int psize;
745
746 while ((np = of_get_next_child(parent, np)) != NULL) {
747 reg = get_property(np, "reg", &psize);
748 if (reg == NULL || psize < 4)
749 continue;
750 if (((reg[0] >> 8) & 0xff) == devfn)
751 return np;
752 }
753 return NULL;
e05b3b4a
PM
754}
755
dae4828d
BH
756
757static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
e05b3b4a 758{
dae4828d
BH
759 struct device_node *parent, *np;
760
761 /* Are we a root bus ? */
762 if (bus->self == NULL || bus->parent == NULL) {
763 struct pci_controller *hose = pci_bus_to_hose(bus->number);
764 if (hose == NULL)
765 return NULL;
766 return of_node_get(hose->arch_data);
767 }
768
769 /* not a root bus, we need to get our parent */
770 parent = scan_OF_for_pci_bus(bus->parent);
771 if (parent == NULL)
772 return NULL;
773
774 /* now iterate for children for a match */
775 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
776 of_node_put(parent);
777
778 /* sanity check */
779 if (strcmp(np->type, "pci") != 0)
780 printk(KERN_WARNING "pci: wrong type \"%s\" for bridge %s\n",
781 np->type, np->full_name);
e05b3b4a 782
dae4828d 783 return np;
e05b3b4a
PM
784}
785
786/*
787 * Scans the OF tree for a device node matching a PCI device
788 */
789struct device_node *
790pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
791{
dae4828d 792 struct device_node *parent, *np;
e05b3b4a
PM
793
794 if (!have_of)
795 return NULL;
e05b3b4a 796
dae4828d
BH
797 DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
798 parent = scan_OF_for_pci_bus(bus);
799 if (parent == NULL)
e05b3b4a 800 return NULL;
dae4828d
BH
801 DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
802 np = scan_OF_for_pci_dev(parent, devfn);
803 of_node_put(parent);
804 DBG(" result is %s\n", np ? np->full_name : "<NULL>");
805
806 /* XXX most callers don't release the returned node
807 * mostly because ppc64 doesn't increase the refcount,
808 * we need to fix that.
e05b3b4a 809 */
dae4828d 810 return np;
e05b3b4a
PM
811}
812EXPORT_SYMBOL(pci_busdev_to_OF_node);
813
814struct device_node*
815pci_device_to_OF_node(struct pci_dev *dev)
816{
817 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
818}
819EXPORT_SYMBOL(pci_device_to_OF_node);
820
821/* This routine is meant to be used early during boot, when the
822 * PCI bus numbers have not yet been assigned, and you need to
823 * issue PCI config cycles to an OF device.
824 * It could also be used to "fix" RTAS config cycles if you want
825 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
826 * config cycles.
827 */
828struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
829{
830 if (!have_of)
831 return NULL;
832 while(node) {
833 struct pci_controller* hose;
834 for (hose=hose_head;hose;hose=hose->next)
835 if (hose->arch_data == node)
836 return hose;
837 node=node->parent;
838 }
839 return NULL;
840}
841
842static int
843find_OF_pci_device_filter(struct device_node* node, void* data)
844{
845 return ((void *)node == data);
846}
847
848/*
849 * Returns the PCI device matching a given OF node
850 */
851int
852pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
853{
a7f67bdf 854 const unsigned int *reg;
e05b3b4a
PM
855 struct pci_controller* hose;
856 struct pci_dev* dev = NULL;
857
858 if (!have_of)
859 return -ENODEV;
860 /* Make sure it's really a PCI device */
861 hose = pci_find_hose_for_OF_device(node);
862 if (!hose || !hose->arch_data)
863 return -ENODEV;
864 if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
865 find_OF_pci_device_filter, (void *)node))
866 return -ENODEV;
a7f67bdf 867 reg = get_property(node, "reg", NULL);
e05b3b4a
PM
868 if (!reg)
869 return -ENODEV;
870 *bus = (reg[0] >> 16) & 0xff;
871 *devfn = ((reg[0] >> 8) & 0xff);
872
873 /* Ok, here we need some tweak. If we have already renumbered
874 * all busses, we can't rely on the OF bus number any more.
875 * the pci_to_OF_bus_map is not enough as several PCI busses
876 * may match the same OF bus number.
877 */
878 if (!pci_to_OF_bus_map)
879 return 0;
880
881 for_each_pci_dev(dev)
882 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
883 dev->devfn == *devfn) {
884 *bus = dev->bus->number;
885 pci_dev_put(dev);
886 return 0;
887 }
888
889 return -ENODEV;
890}
891EXPORT_SYMBOL(pci_device_from_OF_node);
892
893void __init
894pci_process_bridge_OF_ranges(struct pci_controller *hose,
895 struct device_node *dev, int primary)
896{
897 static unsigned int static_lc_ranges[256] __initdata;
a7f67bdf
JK
898 const unsigned int *dt_ranges;
899 unsigned int *lc_ranges, *ranges, *prev, size;
e05b3b4a
PM
900 int rlen = 0, orig_rlen;
901 int memno = 0;
902 struct resource *res;
903 int np, na = prom_n_addr_cells(dev);
904 np = na + 5;
905
906 /* First we try to merge ranges to fix a problem with some pmacs
907 * that can have more than 3 ranges, fortunately using contiguous
908 * addresses -- BenH
909 */
a7f67bdf 910 dt_ranges = get_property(dev, "ranges", &rlen);
e05b3b4a
PM
911 if (!dt_ranges)
912 return;
913 /* Sanity check, though hopefully that never happens */
914 if (rlen > sizeof(static_lc_ranges)) {
915 printk(KERN_WARNING "OF ranges property too large !\n");
916 rlen = sizeof(static_lc_ranges);
917 }
918 lc_ranges = static_lc_ranges;
919 memcpy(lc_ranges, dt_ranges, rlen);
920 orig_rlen = rlen;
921
922 /* Let's work on a copy of the "ranges" property instead of damaging
923 * the device-tree image in memory
924 */
925 ranges = lc_ranges;
926 prev = NULL;
927 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
928 if (prev) {
929 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
930 (prev[2] + prev[na+4]) == ranges[2] &&
931 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
932 prev[na+4] += ranges[na+4];
933 ranges[0] = 0;
934 ranges += np;
935 continue;
936 }
937 }
938 prev = ranges;
939 ranges += np;
940 }
941
942 /*
943 * The ranges property is laid out as an array of elements,
944 * each of which comprises:
945 * cells 0 - 2: a PCI address
946 * cells 3 or 3+4: a CPU physical address
947 * (size depending on dev->n_addr_cells)
948 * cells 4+5 or 5+6: the size of the range
949 */
950 ranges = lc_ranges;
951 rlen = orig_rlen;
952 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
953 res = NULL;
954 size = ranges[na+4];
955 switch ((ranges[0] >> 24) & 0x3) {
956 case 1: /* I/O space */
957 if (ranges[2] != 0)
958 break;
959 hose->io_base_phys = ranges[na+2];
960 /* limit I/O space to 16MB */
961 if (size > 0x01000000)
962 size = 0x01000000;
963 hose->io_base_virt = ioremap(ranges[na+2], size);
964 if (primary)
965 isa_io_base = (unsigned long) hose->io_base_virt;
966 res = &hose->io_resource;
967 res->flags = IORESOURCE_IO;
968 res->start = ranges[2];
685143ac 969 DBG("PCI: IO 0x%llx -> 0x%llx\n",
872455e2 970 (u64)res->start, (u64)res->start + size - 1);
e05b3b4a
PM
971 break;
972 case 2: /* memory space */
973 memno = 0;
974 if (ranges[1] == 0 && ranges[2] == 0
975 && ranges[na+4] <= (16 << 20)) {
976 /* 1st 16MB, i.e. ISA memory area */
977 if (primary)
978 isa_mem_base = ranges[na+2];
979 memno = 1;
980 }
981 while (memno < 3 && hose->mem_resources[memno].flags)
982 ++memno;
983 if (memno == 0)
984 hose->pci_mem_offset = ranges[na+2] - ranges[2];
985 if (memno < 3) {
986 res = &hose->mem_resources[memno];
987 res->flags = IORESOURCE_MEM;
988 if(ranges[0] & 0x40000000)
989 res->flags |= IORESOURCE_PREFETCH;
990 res->start = ranges[na+2];
685143ac 991 DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
872455e2 992 (u64)res->start, (u64)res->start + size - 1);
e05b3b4a
PM
993 }
994 break;
995 }
996 if (res != NULL) {
997 res->name = dev->full_name;
998 res->end = res->start + size - 1;
999 res->parent = NULL;
1000 res->sibling = NULL;
1001 res->child = NULL;
1002 }
1003 ranges += np;
1004 }
1005}
1006
1007/* We create the "pci-OF-bus-map" property now so it appears in the
1008 * /proc device tree
1009 */
1010void __init
1011pci_create_OF_bus_map(void)
1012{
1013 struct property* of_prop;
1014
1015 of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
1016 if (of_prop && find_path_device("/")) {
1017 memset(of_prop, -1, sizeof(struct property) + 256);
1018 of_prop->name = "pci-OF-bus-map";
1019 of_prop->length = 256;
1020 of_prop->value = (unsigned char *)&of_prop[1];
1021 prom_add_property(find_path_device("/"), of_prop);
1022 }
1023}
1024
1025static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1026{
1027 struct pci_dev *pdev;
1028 struct device_node *np;
1029
1030 pdev = to_pci_dev (dev);
1031 np = pci_device_to_OF_node(pdev);
1032 if (np == NULL || np->full_name == NULL)
1033 return 0;
1034 return sprintf(buf, "%s", np->full_name);
1035}
1036static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1037
1038#else /* CONFIG_PPC_OF */
1039void pcibios_make_OF_bus_map(void)
1040{
1041}
1042#endif /* CONFIG_PPC_OF */
1043
1044/* Add sysfs properties */
1045void pcibios_add_platform_entries(struct pci_dev *pdev)
1046{
1047#ifdef CONFIG_PPC_OF
1048 device_create_file(&pdev->dev, &dev_attr_devspec);
1049#endif /* CONFIG_PPC_OF */
1050}
1051
1052
1053#ifdef CONFIG_PPC_PMAC
1054/*
1055 * This set of routines checks for PCI<->PCI bridges that have closed
1056 * IO resources and have child devices. It tries to re-open an IO
1057 * window on them.
1058 *
1059 * This is a _temporary_ fix to workaround a problem with Apple's OF
1060 * closing IO windows on P2P bridges when the OF drivers of cards
1061 * below this bridge don't claim any IO range (typically ATI or
1062 * Adaptec).
1063 *
1064 * A more complete fix would be to use drivers/pci/setup-bus.c, which
1065 * involves a working pcibios_fixup_pbus_ranges(), some more care about
1066 * ordering when creating the host bus resources, and maybe a few more
1067 * minor tweaks
1068 */
1069
1070/* Initialize bridges with base/limit values we have collected */
1071static void __init
1072do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
1073{
1074 struct pci_dev *bridge = bus->self;
1075 struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
1076 u32 l;
1077 u16 w;
1078 struct resource res;
1079
1080 if (bus->resource[0] == NULL)
1081 return;
1082 res = *(bus->resource[0]);
1083
1084 DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
1085 res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
1086 res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
685143ac 1087 DBG(" IO window: %016llx-%016llx\n", res.start, res.end);
e05b3b4a
PM
1088
1089 /* Set up the top and bottom of the PCI I/O segment for this bus. */
1090 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
1091 l &= 0xffff000f;
1092 l |= (res.start >> 8) & 0x00f0;
1093 l |= res.end & 0xf000;
1094 pci_write_config_dword(bridge, PCI_IO_BASE, l);
1095
1096 if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1097 l = (res.start >> 16) | (res.end & 0xffff0000);
1098 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
1099 }
1100
1101 pci_read_config_word(bridge, PCI_COMMAND, &w);
1102 w |= PCI_COMMAND_IO;
1103 pci_write_config_word(bridge, PCI_COMMAND, w);
1104
1105#if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
1106 if (enable_vga) {
1107 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
1108 w |= PCI_BRIDGE_CTL_VGA;
1109 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
1110 }
1111#endif
1112}
1113
1114/* This function is pretty basic and actually quite broken for the
1115 * general case, it's enough for us right now though. It's supposed
1116 * to tell us if we need to open an IO range at all or not and what
1117 * size.
1118 */
1119static int __init
1120check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
1121{
1122 struct pci_dev *dev;
1123 int i;
1124 int rc = 0;
1125
0f582bc1
PM
1126#define push_end(res, mask) do { \
1127 BUG_ON((mask+1) & mask); \
1128 res->end = (res->end + mask) | mask; \
1129} while (0)
e05b3b4a
PM
1130
1131 list_for_each_entry(dev, &bus->devices, bus_list) {
1132 u16 class = dev->class >> 8;
1133
1134 if (class == PCI_CLASS_DISPLAY_VGA ||
1135 class == PCI_CLASS_NOT_DEFINED_VGA)
1136 *found_vga = 1;
1137 if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
1138 rc |= check_for_io_childs(dev->subordinate, res, found_vga);
1139 if (class == PCI_CLASS_BRIDGE_CARDBUS)
1140 push_end(res, 0xfff);
1141
1142 for (i=0; i<PCI_NUM_RESOURCES; i++) {
1143 struct resource *r;
1144 unsigned long r_size;
1145
1146 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
1147 && i >= PCI_BRIDGE_RESOURCES)
1148 continue;
1149 r = &dev->resource[i];
1150 r_size = r->end - r->start;
1151 if (r_size < 0xfff)
1152 r_size = 0xfff;
1153 if (r->flags & IORESOURCE_IO && (r_size) != 0) {
1154 rc = 1;
1155 push_end(res, r_size);
1156 }
1157 }
1158 }
1159
1160 return rc;
1161}
1162
1163/* Here we scan all P2P bridges of a given level that have a closed
1164 * IO window. Note that the test for the presence of a VGA card should
1165 * be improved to take into account already configured P2P bridges,
1166 * currently, we don't see them and might end up configuring 2 bridges
1167 * with VGA pass through enabled
1168 */
1169static void __init
1170do_fixup_p2p_level(struct pci_bus *bus)
1171{
1172 struct pci_bus *b;
1173 int i, parent_io;
1174 int has_vga = 0;
1175
1176 for (parent_io=0; parent_io<4; parent_io++)
1177 if (bus->resource[parent_io]
1178 && bus->resource[parent_io]->flags & IORESOURCE_IO)
1179 break;
1180 if (parent_io >= 4)
1181 return;
1182
1183 list_for_each_entry(b, &bus->children, node) {
1184 struct pci_dev *d = b->self;
1185 struct pci_controller* hose = (struct pci_controller *)d->sysdata;
1186 struct resource *res = b->resource[0];
1187 struct resource tmp_res;
1188 unsigned long max;
1189 int found_vga = 0;
1190
1191 memset(&tmp_res, 0, sizeof(tmp_res));
1192 tmp_res.start = bus->resource[parent_io]->start;
1193
1194 /* We don't let low addresses go through that closed P2P bridge, well,
1195 * that may not be necessary but I feel safer that way
1196 */
1197 if (tmp_res.start == 0)
1198 tmp_res.start = 0x1000;
1199
1200 if (!list_empty(&b->devices) && res && res->flags == 0 &&
1201 res != bus->resource[parent_io] &&
1202 (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
1203 check_for_io_childs(b, &tmp_res, &found_vga)) {
1204 u8 io_base_lo;
1205
1206 printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
1207
1208 if (found_vga) {
1209 if (has_vga) {
1210 printk(KERN_WARNING "Skipping VGA, already active"
1211 " on bus segment\n");
1212 found_vga = 0;
1213 } else
1214 has_vga = 1;
1215 }
1216 pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
1217
1218 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
1219 max = ((unsigned long) hose->io_base_virt
1220 - isa_io_base) + 0xffffffff;
1221 else
1222 max = ((unsigned long) hose->io_base_virt
1223 - isa_io_base) + 0xffff;
1224
1225 *res = tmp_res;
1226 res->flags = IORESOURCE_IO;
1227 res->name = b->name;
1228
1229 /* Find a resource in the parent where we can allocate */
1230 for (i = 0 ; i < 4; i++) {
1231 struct resource *r = bus->resource[i];
1232 if (!r)
1233 continue;
1234 if ((r->flags & IORESOURCE_IO) == 0)
1235 continue;
685143ac
GKH
1236 DBG("Trying to allocate from %016llx, size %016llx from parent"
1237 " res %d: %016llx -> %016llx\n",
e05b3b4a
PM
1238 res->start, res->end, i, r->start, r->end);
1239
1240 if (allocate_resource(r, res, res->end + 1, res->start, max,
1241 res->end + 1, NULL, NULL) < 0) {
1242 DBG("Failed !\n");
1243 continue;
1244 }
1245 do_update_p2p_io_resource(b, found_vga);
1246 break;
1247 }
1248 }
1249 do_fixup_p2p_level(b);
1250 }
1251}
1252
1253static void
1254pcibios_fixup_p2p_bridges(void)
1255{
1256 struct pci_bus *b;
1257
1258 list_for_each_entry(b, &pci_root_buses, node)
1259 do_fixup_p2p_level(b);
1260}
1261
1262#endif /* CONFIG_PPC_PMAC */
1263
1264static int __init
1265pcibios_init(void)
1266{
1267 struct pci_controller *hose;
1268 struct pci_bus *bus;
1269 int next_busno;
1270
1271 printk(KERN_INFO "PCI: Probing PCI hardware\n");
1272
1273 /* Scan all of the recorded PCI controllers. */
1274 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1275 if (pci_assign_all_buses)
1276 hose->first_busno = next_busno;
1277 hose->last_busno = 0xff;
803d4573
BH
1278 bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
1279 hose->ops, hose);
1280 if (bus)
1281 pci_bus_add_devices(bus);
e05b3b4a
PM
1282 hose->last_busno = bus->subordinate;
1283 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1284 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1285 }
1286 pci_bus_count = next_busno;
1287
1288 /* OpenFirmware based machines need a map of OF bus
1289 * numbers vs. kernel bus numbers since we may have to
1290 * remap them.
1291 */
1292 if (pci_assign_all_buses && have_of)
1293 pcibios_make_OF_bus_map();
1294
e05b3b4a
PM
1295 /* Call machine dependent fixup */
1296 if (ppc_md.pcibios_fixup)
1297 ppc_md.pcibios_fixup();
1298
1299 /* Allocate and assign resources */
1300 pcibios_allocate_bus_resources(&pci_root_buses);
1301 pcibios_allocate_resources(0);
1302 pcibios_allocate_resources(1);
1303#ifdef CONFIG_PPC_PMAC
1304 pcibios_fixup_p2p_bridges();
1305#endif /* CONFIG_PPC_PMAC */
1306 pcibios_assign_resources();
1307
1308 /* Call machine dependent post-init code */
1309 if (ppc_md.pcibios_after_init)
1310 ppc_md.pcibios_after_init();
1311
1312 return 0;
1313}
1314
1315subsys_initcall(pcibios_init);
1316
e05b3b4a
PM
1317unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
1318 unsigned long start, unsigned long size)
1319{
1320 return start;
1321}
1322
1323void __init pcibios_fixup_bus(struct pci_bus *bus)
1324{
1325 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1326 unsigned long io_offset;
1327 struct resource *res;
f90bb153 1328 struct pci_dev *dev;
e05b3b4a
PM
1329 int i;
1330
1331 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1332 if (bus->parent == NULL) {
1333 /* This is a host bridge - fill in its resources */
1334 hose->bus = bus;
1335
1336 bus->resource[0] = res = &hose->io_resource;
1337 if (!res->flags) {
1338 if (io_offset)
1339 printk(KERN_ERR "I/O resource not set for host"
1340 " bridge %d\n", hose->index);
1341 res->start = 0;
1342 res->end = IO_SPACE_LIMIT;
1343 res->flags = IORESOURCE_IO;
1344 }
1345 res->start += io_offset;
1346 res->end += io_offset;
1347
1348 for (i = 0; i < 3; ++i) {
1349 res = &hose->mem_resources[i];
1350 if (!res->flags) {
1351 if (i > 0)
1352 continue;
1353 printk(KERN_ERR "Memory resource not set for "
1354 "host bridge %d\n", hose->index);
1355 res->start = hose->pci_mem_offset;
1356 res->end = ~0U;
1357 res->flags = IORESOURCE_MEM;
1358 }
1359 bus->resource[i+1] = res;
1360 }
1361 } else {
1362 /* This is a subordinate bridge */
1363 pci_read_bridge_bases(bus);
1364
1365 for (i = 0; i < 4; ++i) {
1366 if ((res = bus->resource[i]) == NULL)
1367 continue;
1368 if (!res->flags)
1369 continue;
1370 if (io_offset && (res->flags & IORESOURCE_IO)) {
1371 res->start += io_offset;
1372 res->end += io_offset;
1373 } else if (hose->pci_mem_offset
1374 && (res->flags & IORESOURCE_MEM)) {
1375 res->start += hose->pci_mem_offset;
1376 res->end += hose->pci_mem_offset;
1377 }
1378 }
1379 }
1380
f90bb153 1381 /* Platform specific bus fixups */
e05b3b4a
PM
1382 if (ppc_md.pcibios_fixup_bus)
1383 ppc_md.pcibios_fixup_bus(bus);
f90bb153
BH
1384
1385 /* Read default IRQs and fixup if necessary */
1386 list_for_each_entry(dev, &bus->devices, bus_list) {
1387 pci_read_irq_line(dev);
1388 if (ppc_md.pci_irq_fixup)
1389 ppc_md.pci_irq_fixup(dev);
1390 }
e05b3b4a
PM
1391}
1392
1393char __init *pcibios_setup(char *str)
1394{
1395 return str;
1396}
1397
1398/* the next one is stolen from the alpha port... */
1399void __init
1400pcibios_update_irq(struct pci_dev *dev, int irq)
1401{
1402 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
1403 /* XXX FIXME - update OF device tree node interrupt property */
1404}
1405
0ebfff14
BH
1406#ifdef CONFIG_PPC_MERGE
1407/* XXX This is a copy of the ppc64 version. This is temporary until we start
1408 * merging the 2 PCI layers
1409 */
1410/*
1411 * Reads the interrupt pin to determine if interrupt is use by card.
1412 * If the interrupt is used, then gets the interrupt line from the
1413 * openfirmware and sets it in the pci_dev and pci_config line.
1414 */
1415int pci_read_irq_line(struct pci_dev *pci_dev)
1416{
1417 struct of_irq oirq;
1418 unsigned int virq;
1419
1420 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1421
6e99e458 1422 /* Try to get a mapping from the device-tree */
0ebfff14 1423 if (of_irq_map_pci(pci_dev, &oirq)) {
6e99e458
BH
1424 u8 line, pin;
1425
1426 /* If that fails, lets fallback to what is in the config
1427 * space and map that through the default controller. We
1428 * also set the type to level low since that's what PCI
1429 * interrupts are. If your platform does differently, then
1430 * either provide a proper interrupt tree or don't use this
1431 * function.
1432 */
1433 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
1434 return -1;
1435 if (pin == 0)
1436 return -1;
1437 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
1438 line == 0xff) {
1439 return -1;
1440 }
1441 DBG(" -> no map ! Using irq line %d from PCI config\n", line);
0ebfff14 1442
6e99e458
BH
1443 virq = irq_create_mapping(NULL, line);
1444 if (virq != NO_IRQ)
1445 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
1446 } else {
1447 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
1448 oirq.size, oirq.specifier[0], oirq.controller->full_name);
0ebfff14 1449
6e99e458
BH
1450 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
1451 oirq.size);
1452 }
0ebfff14
BH
1453 if(virq == NO_IRQ) {
1454 DBG(" -> failed to map !\n");
1455 return -1;
1456 }
1457 pci_dev->irq = virq;
1458 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1459
1460 return 0;
1461}
1462EXPORT_SYMBOL(pci_read_irq_line);
1463#endif /* CONFIG_PPC_MERGE */
1464
e05b3b4a
PM
1465int pcibios_enable_device(struct pci_dev *dev, int mask)
1466{
1467 u16 cmd, old_cmd;
1468 int idx;
1469 struct resource *r;
1470
1471 if (ppc_md.pcibios_enable_device_hook)
1472 if (ppc_md.pcibios_enable_device_hook(dev, 0))
1473 return -EINVAL;
1474
1475 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1476 old_cmd = cmd;
1477 for (idx=0; idx<6; idx++) {
1478 r = &dev->resource[idx];
1479 if (r->flags & IORESOURCE_UNSET) {
1480 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
1481 return -EINVAL;
1482 }
1483 if (r->flags & IORESOURCE_IO)
1484 cmd |= PCI_COMMAND_IO;
1485 if (r->flags & IORESOURCE_MEM)
1486 cmd |= PCI_COMMAND_MEMORY;
1487 }
1488 if (cmd != old_cmd) {
1489 printk("PCI: Enabling device %s (%04x -> %04x)\n",
1490 pci_name(dev), old_cmd, cmd);
1491 pci_write_config_word(dev, PCI_COMMAND, cmd);
1492 }
1493 return 0;
1494}
1495
1496struct pci_controller*
1497pci_bus_to_hose(int bus)
1498{
1499 struct pci_controller* hose = hose_head;
1500
1501 for (; hose; hose = hose->next)
1502 if (bus >= hose->first_busno && bus <= hose->last_busno)
1503 return hose;
1504 return NULL;
1505}
1506
1507void __iomem *
1508pci_bus_io_base(unsigned int bus)
1509{
1510 struct pci_controller *hose;
1511
1512 hose = pci_bus_to_hose(bus);
1513 if (!hose)
1514 return NULL;
1515 return hose->io_base_virt;
1516}
1517
1518unsigned long
1519pci_bus_io_base_phys(unsigned int bus)
1520{
1521 struct pci_controller *hose;
1522
1523 hose = pci_bus_to_hose(bus);
1524 if (!hose)
1525 return 0;
1526 return hose->io_base_phys;
1527}
1528
1529unsigned long
1530pci_bus_mem_base_phys(unsigned int bus)
1531{
1532 struct pci_controller *hose;
1533
1534 hose = pci_bus_to_hose(bus);
1535 if (!hose)
1536 return 0;
1537 return hose->pci_mem_offset;
1538}
1539
1540unsigned long
1541pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
1542{
1543 /* Hack alert again ! See comments in chrp_pci.c
1544 */
1545 struct pci_controller* hose =
1546 (struct pci_controller *)pdev->sysdata;
1547 if (hose && res->flags & IORESOURCE_MEM)
1548 return res->start - hose->pci_mem_offset;
1549 /* We may want to do something with IOs here... */
1550 return res->start;
1551}
1552
1553
1554static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
396a1a58 1555 resource_size_t *offset,
e05b3b4a
PM
1556 enum pci_mmap_state mmap_state)
1557{
1558 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1559 unsigned long io_offset = 0;
1560 int i, res_bit;
1561
1562 if (hose == 0)
1563 return NULL; /* should never happen */
1564
1565 /* If memory, add on the PCI bridge address offset */
1566 if (mmap_state == pci_mmap_mem) {
396a1a58 1567#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
e05b3b4a 1568 *offset += hose->pci_mem_offset;
396a1a58 1569#endif
e05b3b4a
PM
1570 res_bit = IORESOURCE_MEM;
1571 } else {
68a64357 1572 io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
e05b3b4a
PM
1573 *offset += io_offset;
1574 res_bit = IORESOURCE_IO;
1575 }
1576
1577 /*
1578 * Check that the offset requested corresponds to one of the
1579 * resources of the device.
1580 */
1581 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1582 struct resource *rp = &dev->resource[i];
1583 int flags = rp->flags;
1584
1585 /* treat ROM as memory (should be already) */
1586 if (i == PCI_ROM_RESOURCE)
1587 flags |= IORESOURCE_MEM;
1588
1589 /* Active and same type? */
1590 if ((flags & res_bit) == 0)
1591 continue;
1592
1593 /* In the range of this resource? */
1594 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
1595 continue;
1596
1597 /* found it! construct the final physical address */
1598 if (mmap_state == pci_mmap_io)
1599 *offset += hose->io_base_phys - io_offset;
1600 return rp;
1601 }
1602
1603 return NULL;
1604}
1605
1606/*
1607 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1608 * device mapping.
1609 */
1610static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1611 pgprot_t protection,
1612 enum pci_mmap_state mmap_state,
1613 int write_combine)
1614{
1615 unsigned long prot = pgprot_val(protection);
1616
1617 /* Write combine is always 0 on non-memory space mappings. On
1618 * memory space, if the user didn't pass 1, we check for a
1619 * "prefetchable" resource. This is a bit hackish, but we use
1620 * this to workaround the inability of /sysfs to provide a write
1621 * combine bit
1622 */
1623 if (mmap_state != pci_mmap_mem)
1624 write_combine = 0;
1625 else if (write_combine == 0) {
1626 if (rp->flags & IORESOURCE_PREFETCH)
1627 write_combine = 1;
1628 }
1629
1630 /* XXX would be nice to have a way to ask for write-through */
1631 prot |= _PAGE_NO_CACHE;
1632 if (write_combine)
1633 prot &= ~_PAGE_GUARDED;
1634 else
1635 prot |= _PAGE_GUARDED;
1636
e05b3b4a
PM
1637 return __pgprot(prot);
1638}
1639
1640/*
1641 * This one is used by /dev/mem and fbdev who have no clue about the
1642 * PCI device, it tries to find the PCI device first and calls the
1643 * above routine
1644 */
1645pgprot_t pci_phys_mem_access_prot(struct file *file,
1646 unsigned long pfn,
1647 unsigned long size,
1648 pgprot_t protection)
1649{
1650 struct pci_dev *pdev = NULL;
1651 struct resource *found = NULL;
1652 unsigned long prot = pgprot_val(protection);
1653 unsigned long offset = pfn << PAGE_SHIFT;
1654 int i;
1655
1656 if (page_is_ram(pfn))
1657 return prot;
1658
1659 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
1660
1661 for_each_pci_dev(pdev) {
1662 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1663 struct resource *rp = &pdev->resource[i];
1664 int flags = rp->flags;
1665
1666 /* Active and same type? */
1667 if ((flags & IORESOURCE_MEM) == 0)
1668 continue;
1669 /* In the range of this resource? */
1670 if (offset < (rp->start & PAGE_MASK) ||
1671 offset > rp->end)
1672 continue;
1673 found = rp;
1674 break;
1675 }
1676 if (found)
1677 break;
1678 }
1679 if (found) {
1680 if (found->flags & IORESOURCE_PREFETCH)
1681 prot &= ~_PAGE_GUARDED;
1682 pci_dev_put(pdev);
1683 }
1684
1685 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
1686
1687 return __pgprot(prot);
1688}
1689
1690
1691/*
1692 * Perform the actual remap of the pages for a PCI device mapping, as
1693 * appropriate for this architecture. The region in the process to map
1694 * is described by vm_start and vm_end members of VMA, the base physical
1695 * address is found in vm_pgoff.
1696 * The pci device structure is provided so that architectures may make mapping
1697 * decisions on a per-device or per-bus basis.
1698 *
1699 * Returns a negative error code on failure, zero on success.
1700 */
1701int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1702 enum pci_mmap_state mmap_state,
1703 int write_combine)
1704{
396a1a58 1705 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
e05b3b4a
PM
1706 struct resource *rp;
1707 int ret;
1708
1709 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
1710 if (rp == NULL)
1711 return -EINVAL;
1712
1713 vma->vm_pgoff = offset >> PAGE_SHIFT;
e05b3b4a
PM
1714 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1715 vma->vm_page_prot,
1716 mmap_state, write_combine);
1717
1718 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1719 vma->vm_end - vma->vm_start, vma->vm_page_prot);
1720
1721 return ret;
1722}
1723
1724/* Obsolete functions. Should be removed once the symbios driver
1725 * is fixed
1726 */
1727unsigned long
1728phys_to_bus(unsigned long pa)
1729{
1730 struct pci_controller *hose;
1731 int i;
1732
1733 for (hose = hose_head; hose; hose = hose->next) {
1734 for (i = 0; i < 3; ++i) {
1735 if (pa >= hose->mem_resources[i].start
1736 && pa <= hose->mem_resources[i].end) {
1737 /*
1738 * XXX the hose->pci_mem_offset really
1739 * only applies to mem_resources[0].
1740 * We need a way to store an offset for
1741 * the others. -- paulus
1742 */
1743 if (i == 0)
1744 pa -= hose->pci_mem_offset;
1745 return pa;
1746 }
1747 }
1748 }
1749 /* hmmm, didn't find it */
1750 return 0;
1751}
1752
1753unsigned long
1754pci_phys_to_bus(unsigned long pa, int busnr)
1755{
1756 struct pci_controller* hose = pci_bus_to_hose(busnr);
1757 if (!hose)
1758 return pa;
1759 return pa - hose->pci_mem_offset;
1760}
1761
1762unsigned long
1763pci_bus_to_phys(unsigned int ba, int busnr)
1764{
1765 struct pci_controller* hose = pci_bus_to_hose(busnr);
1766 if (!hose)
1767 return ba;
1768 return ba + hose->pci_mem_offset;
1769}
1770
1771/* Provide information on locations of various I/O regions in physical
1772 * memory. Do this on a per-card basis so that we choose the right
1773 * root bridge.
1774 * Note that the returned IO or memory base is a physical address
1775 */
1776
1777long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1778{
1779 struct pci_controller* hose;
1780 long result = -EOPNOTSUPP;
1781
1782 /* Argh ! Please forgive me for that hack, but that's the
1783 * simplest way to get existing XFree to not lockup on some
1784 * G5 machines... So when something asks for bus 0 io base
1785 * (bus 0 is HT root), we return the AGP one instead.
1786 */
1787#ifdef CONFIG_PPC_PMAC
e8222502 1788 if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
e05b3b4a
PM
1789 if (bus == 0)
1790 bus = 0xf0;
1791#endif /* CONFIG_PPC_PMAC */
1792
1793 hose = pci_bus_to_hose(bus);
1794 if (!hose)
1795 return -ENODEV;
1796
1797 switch (which) {
1798 case IOBASE_BRIDGE_NUMBER:
1799 return (long)hose->first_busno;
1800 case IOBASE_MEMORY:
1801 return (long)hose->pci_mem_offset;
1802 case IOBASE_IO:
1803 return (long)hose->io_base_phys;
1804 case IOBASE_ISA_IO:
1805 return (long)isa_io_base;
1806 case IOBASE_ISA_MEM:
1807 return (long)isa_mem_base;
1808 }
1809
1810 return result;
1811}
1812
1813void pci_resource_to_user(const struct pci_dev *dev, int bar,
1814 const struct resource *rsrc,
e31dd6e4 1815 resource_size_t *start, resource_size_t *end)
e05b3b4a
PM
1816{
1817 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
396a1a58 1818 resource_size_t offset = 0;
e05b3b4a
PM
1819
1820 if (hose == NULL)
1821 return;
1822
1823 if (rsrc->flags & IORESOURCE_IO)
396a1a58
BH
1824 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1825
1826 /* We pass a fully fixed up address to userland for MMIO instead of
1827 * a BAR value because X is lame and expects to be able to use that
1828 * to pass to /dev/mem !
1829 *
1830 * That means that we'll have potentially 64 bits values where some
1831 * userland apps only expect 32 (like X itself since it thinks only
1832 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1833 * 32 bits CHRPs :-(
1834 *
1835 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1836 * has been fixed (and the fix spread enough), we can re-enable the
1837 * 2 lines below and pass down a BAR value to userland. In that case
1838 * we'll also have to re-enable the matching code in
1839 * __pci_mmap_make_offset().
1840 *
1841 * BenH.
1842 */
1843#if 0
1844 else if (rsrc->flags & IORESOURCE_MEM)
1845 offset = hose->pci_mem_offset;
1846#endif
e05b3b4a 1847
396a1a58
BH
1848 *start = rsrc->start - offset;
1849 *end = rsrc->end - offset;
e05b3b4a
PM
1850}
1851
396a1a58
BH
1852void __init pci_init_resource(struct resource *res, resource_size_t start,
1853 resource_size_t end, int flags, char *name)
e05b3b4a
PM
1854{
1855 res->start = start;
1856 res->end = end;
1857 res->flags = flags;
1858 res->name = name;
1859 res->parent = NULL;
1860 res->sibling = NULL;
1861 res->child = NULL;
1862}
1863
e05b3b4a
PM
1864unsigned long pci_address_to_pio(phys_addr_t address)
1865{
1866 struct pci_controller* hose = hose_head;
1867
1868 for (; hose; hose = hose->next) {
1869 unsigned int size = hose->io_resource.end -
1870 hose->io_resource.start + 1;
1871 if (address >= hose->io_base_phys &&
1872 address < (hose->io_base_phys + size)) {
1873 unsigned long base =
1874 (unsigned long)hose->io_base_virt - _IO_BASE;
1875 return base + (address - hose->io_base_phys);
1876 }
1877 }
1878 return (unsigned int)-1;
1879}
1880EXPORT_SYMBOL(pci_address_to_pio);
1881
1882/*
1883 * Null PCI config access functions, for the case when we can't
1884 * find a hose.
1885 */
1886#define NULL_PCI_OP(rw, size, type) \
1887static int \
1888null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1889{ \
1890 return PCIBIOS_DEVICE_NOT_FOUND; \
1891}
1892
1893static int
1894null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1895 int len, u32 *val)
1896{
1897 return PCIBIOS_DEVICE_NOT_FOUND;
1898}
1899
1900static int
1901null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1902 int len, u32 val)
1903{
1904 return PCIBIOS_DEVICE_NOT_FOUND;
1905}
1906
1907static struct pci_ops null_pci_ops =
1908{
1909 null_read_config,
1910 null_write_config
1911};
1912
1913/*
1914 * These functions are used early on before PCI scanning is done
1915 * and all of the pci_dev and pci_bus structures have been created.
1916 */
1917static struct pci_bus *
1918fake_pci_bus(struct pci_controller *hose, int busnr)
1919{
1920 static struct pci_bus bus;
1921
1922 if (hose == 0) {
1923 hose = pci_bus_to_hose(busnr);
1924 if (hose == 0)
1925 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1926 }
1927 bus.number = busnr;
1928 bus.sysdata = hose;
1929 bus.ops = hose? hose->ops: &null_pci_ops;
1930 return &bus;
1931}
1932
1933#define EARLY_PCI_OP(rw, size, type) \
1934int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1935 int devfn, int offset, type value) \
1936{ \
1937 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1938 devfn, offset, value); \
1939}
1940
1941EARLY_PCI_OP(read, byte, u8 *)
1942EARLY_PCI_OP(read, word, u16 *)
1943EARLY_PCI_OP(read, dword, u32 *)
1944EARLY_PCI_OP(write, byte, u8)
1945EARLY_PCI_OP(write, word, u16)
1946EARLY_PCI_OP(write, dword, u32)