[POWERPC] briq_panel Kconfig fix
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / pci_32.c
CommitLineData
e05b3b4a
PM
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
e05b3b4a
PM
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
14
15#include <asm/processor.h>
16#include <asm/io.h>
17#include <asm/prom.h>
18#include <asm/sections.h>
19#include <asm/pci-bridge.h>
20#include <asm/byteorder.h>
21#include <asm/irq.h>
22#include <asm/uaccess.h>
23#include <asm/machdep.h>
24
25#undef DEBUG
26
27#ifdef DEBUG
28#define DBG(x...) printk(x)
29#else
30#define DBG(x...)
31#endif
32
33unsigned long isa_io_base = 0;
34unsigned long isa_mem_base = 0;
35unsigned long pci_dram_offset = 0;
36int pcibios_assign_bus_offset = 1;
37
38void pcibios_make_OF_bus_map(void);
39
40static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
41static int probe_resource(struct pci_bus *parent, struct resource *pr,
42 struct resource *res, struct resource **conflict);
43static void update_bridge_base(struct pci_bus *bus, int i);
44static void pcibios_fixup_resources(struct pci_dev* dev);
45static void fixup_broken_pcnet32(struct pci_dev* dev);
46static int reparent_resources(struct resource *parent, struct resource *res);
47static void fixup_cpc710_pci64(struct pci_dev* dev);
48#ifdef CONFIG_PPC_OF
49static u8* pci_to_OF_bus_map;
50#endif
51
52/* By default, we don't re-assign bus numbers. We do this only on
53 * some pmacs
54 */
55int pci_assign_all_buses;
56
57struct pci_controller* hose_head;
58struct pci_controller** hose_tail = &hose_head;
59
60static int pci_bus_count;
61
62static void
63fixup_broken_pcnet32(struct pci_dev* dev)
64{
65 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
66 dev->vendor = PCI_VENDOR_ID_AMD;
67 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
68 }
69}
70DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
71
72static void
73fixup_cpc710_pci64(struct pci_dev* dev)
74{
75 /* Hide the PCI64 BARs from the kernel as their content doesn't
76 * fit well in the resource management
77 */
78 dev->resource[0].start = dev->resource[0].end = 0;
79 dev->resource[0].flags = 0;
80 dev->resource[1].start = dev->resource[1].end = 0;
81 dev->resource[1].flags = 0;
82}
83DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
84
85static void
86pcibios_fixup_resources(struct pci_dev *dev)
87{
88 struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
89 int i;
90 unsigned long offset;
91
92 if (!hose) {
93 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
94 return;
95 }
96 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
97 struct resource *res = dev->resource + i;
98 if (!res->flags)
99 continue;
100 if (res->end == 0xffffffff) {
685143ac 101 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
e05b3b4a
PM
102 pci_name(dev), i, res->start, res->end);
103 res->end -= res->start;
104 res->start = 0;
105 res->flags |= IORESOURCE_UNSET;
106 continue;
107 }
108 offset = 0;
109 if (res->flags & IORESOURCE_MEM) {
110 offset = hose->pci_mem_offset;
111 } else if (res->flags & IORESOURCE_IO) {
112 offset = (unsigned long) hose->io_base_virt
113 - isa_io_base;
114 }
115 if (offset != 0) {
116 res->start += offset;
117 res->end += offset;
118#ifdef DEBUG
685143ac 119 printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
e05b3b4a
PM
120 i, res->flags, pci_name(dev),
121 res->start - offset, res->start);
122#endif
123 }
124 }
125
126 /* Call machine specific resource fixup */
127 if (ppc_md.pcibios_fixup_resources)
128 ppc_md.pcibios_fixup_resources(dev);
129}
130DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
131
132void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
133 struct resource *res)
134{
135 unsigned long offset = 0;
136 struct pci_controller *hose = dev->sysdata;
137
138 if (hose && res->flags & IORESOURCE_IO)
139 offset = (unsigned long)hose->io_base_virt - isa_io_base;
140 else if (hose && res->flags & IORESOURCE_MEM)
141 offset = hose->pci_mem_offset;
142 region->start = res->start - offset;
143 region->end = res->end - offset;
144}
145EXPORT_SYMBOL(pcibios_resource_to_bus);
146
147void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
148 struct pci_bus_region *region)
149{
150 unsigned long offset = 0;
151 struct pci_controller *hose = dev->sysdata;
152
153 if (hose && res->flags & IORESOURCE_IO)
154 offset = (unsigned long)hose->io_base_virt - isa_io_base;
155 else if (hose && res->flags & IORESOURCE_MEM)
156 offset = hose->pci_mem_offset;
157 res->start = region->start + offset;
158 res->end = region->end + offset;
159}
160EXPORT_SYMBOL(pcibios_bus_to_resource);
161
162/*
163 * We need to avoid collisions with `mirrored' VGA ports
164 * and other strange ISA hardware, so we always want the
165 * addresses to be allocated in the 0x000-0x0ff region
166 * modulo 0x400.
167 *
168 * Why? Because some silly external IO cards only decode
169 * the low 10 bits of the IO address. The 0x00-0xff region
170 * is reserved for motherboard devices that decode all 16
171 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
172 * but we want to try to avoid allocating at 0x2900-0x2bff
173 * which might have be mirrored at 0x0100-0x03ff..
174 */
e31dd6e4
GKH
175void pcibios_align_resource(void *data, struct resource *res,
176 resource_size_t size, resource_size_t align)
e05b3b4a
PM
177{
178 struct pci_dev *dev = data;
179
180 if (res->flags & IORESOURCE_IO) {
e31dd6e4 181 resource_size_t start = res->start;
e05b3b4a
PM
182
183 if (size > 0x100) {
184 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
685143ac 185 " (%lld bytes)\n", pci_name(dev),
e31dd6e4 186 dev->resource - res, (unsigned long long)size);
e05b3b4a
PM
187 }
188
189 if (start & 0x300) {
190 start = (start + 0x3ff) & ~0x3ff;
191 res->start = start;
192 }
193 }
194}
195EXPORT_SYMBOL(pcibios_align_resource);
196
197/*
198 * Handle resources of PCI devices. If the world were perfect, we could
199 * just allocate all the resource regions and do nothing more. It isn't.
200 * On the other hand, we cannot just re-allocate all devices, as it would
201 * require us to know lots of host bridge internals. So we attempt to
202 * keep as much of the original configuration as possible, but tweak it
203 * when it's found to be wrong.
204 *
205 * Known BIOS problems we have to work around:
206 * - I/O or memory regions not configured
207 * - regions configured, but not enabled in the command register
208 * - bogus I/O addresses above 64K used
209 * - expansion ROMs left enabled (this may sound harmless, but given
210 * the fact the PCI specs explicitly allow address decoders to be
211 * shared between expansion ROMs and other resource regions, it's
212 * at least dangerous)
213 *
214 * Our solution:
215 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
216 * This gives us fixed barriers on where we can allocate.
217 * (2) Allocate resources for all enabled devices. If there is
218 * a collision, just mark the resource as unallocated. Also
219 * disable expansion ROMs during this step.
220 * (3) Try to allocate resources for disabled devices. If the
221 * resources were assigned correctly, everything goes well,
222 * if they weren't, they won't disturb allocation of other
223 * resources.
224 * (4) Assign new addresses to resources which were either
225 * not configured at all or misconfigured. If explicitly
226 * requested by the user, configure expansion ROM address
227 * as well.
228 */
229
230static void __init
231pcibios_allocate_bus_resources(struct list_head *bus_list)
232{
233 struct pci_bus *bus;
234 int i;
235 struct resource *res, *pr;
236
237 /* Depth-First Search on bus tree */
238 list_for_each_entry(bus, bus_list, node) {
239 for (i = 0; i < 4; ++i) {
240 if ((res = bus->resource[i]) == NULL || !res->flags
241 || res->start > res->end)
242 continue;
243 if (bus->parent == NULL)
244 pr = (res->flags & IORESOURCE_IO)?
245 &ioport_resource: &iomem_resource;
246 else {
247 pr = pci_find_parent_resource(bus->self, res);
248 if (pr == res) {
249 /* this happens when the generic PCI
250 * code (wrongly) decides that this
251 * bridge is transparent -- paulus
252 */
253 continue;
254 }
255 }
256
685143ac
GKH
257 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
258 res->start, res->end, res->flags, pr);
e05b3b4a
PM
259 if (pr) {
260 if (request_resource(pr, res) == 0)
261 continue;
262 /*
263 * Must be a conflict with an existing entry.
264 * Move that entry (or entries) under the
265 * bridge resource and try again.
266 */
267 if (reparent_resources(pr, res) == 0)
268 continue;
269 }
270 printk(KERN_ERR "PCI: Cannot allocate resource region "
271 "%d of PCI bridge %d\n", i, bus->number);
272 if (pci_relocate_bridge_resource(bus, i))
273 bus->resource[i] = NULL;
274 }
275 pcibios_allocate_bus_resources(&bus->children);
276 }
277}
278
279/*
280 * Reparent resource children of pr that conflict with res
281 * under res, and make res replace those children.
282 */
283static int __init
284reparent_resources(struct resource *parent, struct resource *res)
285{
286 struct resource *p, **pp;
287 struct resource **firstpp = NULL;
288
289 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
290 if (p->end < res->start)
291 continue;
292 if (res->end < p->start)
293 break;
294 if (p->start < res->start || p->end > res->end)
295 return -1; /* not completely contained */
296 if (firstpp == NULL)
297 firstpp = pp;
298 }
299 if (firstpp == NULL)
300 return -1; /* didn't find any conflicting entries? */
301 res->parent = parent;
302 res->child = *firstpp;
303 res->sibling = *pp;
304 *firstpp = res;
305 *pp = NULL;
306 for (p = res->child; p != NULL; p = p->sibling) {
307 p->parent = res;
685143ac 308 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
e05b3b4a
PM
309 p->name, p->start, p->end, res->name);
310 }
311 return 0;
312}
313
314/*
315 * A bridge has been allocated a range which is outside the range
316 * of its parent bridge, so it needs to be moved.
317 */
318static int __init
319pci_relocate_bridge_resource(struct pci_bus *bus, int i)
320{
321 struct resource *res, *pr, *conflict;
322 unsigned long try, size;
323 int j;
324 struct pci_bus *parent = bus->parent;
325
326 if (parent == NULL) {
327 /* shouldn't ever happen */
328 printk(KERN_ERR "PCI: can't move host bridge resource\n");
329 return -1;
330 }
331 res = bus->resource[i];
332 if (res == NULL)
333 return -1;
334 pr = NULL;
335 for (j = 0; j < 4; j++) {
336 struct resource *r = parent->resource[j];
337 if (!r)
338 continue;
339 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
340 continue;
341 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
342 pr = r;
343 break;
344 }
345 if (res->flags & IORESOURCE_PREFETCH)
346 pr = r;
347 }
348 if (pr == NULL)
349 return -1;
350 size = res->end - res->start;
351 if (pr->start > pr->end || size > pr->end - pr->start)
352 return -1;
353 try = pr->end;
354 for (;;) {
355 res->start = try - size;
356 res->end = try;
357 if (probe_resource(bus->parent, pr, res, &conflict) == 0)
358 break;
359 if (conflict->start <= pr->start + size)
360 return -1;
361 try = conflict->start - 1;
362 }
363 if (request_resource(pr, res)) {
685143ac 364 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
e05b3b4a
PM
365 res->start, res->end);
366 return -1; /* "can't happen" */
367 }
368 update_bridge_base(bus, i);
685143ac
GKH
369 printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
370 bus->number, i, (unsigned long long)res->start,
371 (unsigned long long)res->end);
e05b3b4a
PM
372 return 0;
373}
374
375static int __init
376probe_resource(struct pci_bus *parent, struct resource *pr,
377 struct resource *res, struct resource **conflict)
378{
379 struct pci_bus *bus;
380 struct pci_dev *dev;
381 struct resource *r;
382 int i;
383
384 for (r = pr->child; r != NULL; r = r->sibling) {
385 if (r->end >= res->start && res->end >= r->start) {
386 *conflict = r;
387 return 1;
388 }
389 }
390 list_for_each_entry(bus, &parent->children, node) {
391 for (i = 0; i < 4; ++i) {
392 if ((r = bus->resource[i]) == NULL)
393 continue;
394 if (!r->flags || r->start > r->end || r == res)
395 continue;
396 if (pci_find_parent_resource(bus->self, r) != pr)
397 continue;
398 if (r->end >= res->start && res->end >= r->start) {
399 *conflict = r;
400 return 1;
401 }
402 }
403 }
404 list_for_each_entry(dev, &parent->devices, bus_list) {
405 for (i = 0; i < 6; ++i) {
406 r = &dev->resource[i];
407 if (!r->flags || (r->flags & IORESOURCE_UNSET))
408 continue;
409 if (pci_find_parent_resource(dev, r) != pr)
410 continue;
411 if (r->end >= res->start && res->end >= r->start) {
412 *conflict = r;
413 return 1;
414 }
415 }
416 }
417 return 0;
418}
419
420static void __init
421update_bridge_base(struct pci_bus *bus, int i)
422{
423 struct resource *res = bus->resource[i];
424 u8 io_base_lo, io_limit_lo;
425 u16 mem_base, mem_limit;
426 u16 cmd;
427 unsigned long start, end, off;
428 struct pci_dev *dev = bus->self;
429 struct pci_controller *hose = dev->sysdata;
430
431 if (!hose) {
432 printk("update_bridge_base: no hose?\n");
433 return;
434 }
435 pci_read_config_word(dev, PCI_COMMAND, &cmd);
436 pci_write_config_word(dev, PCI_COMMAND,
437 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
438 if (res->flags & IORESOURCE_IO) {
439 off = (unsigned long) hose->io_base_virt - isa_io_base;
440 start = res->start - off;
441 end = res->end - off;
442 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
443 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
444 if (end > 0xffff) {
445 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
446 start >> 16);
447 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
448 end >> 16);
449 io_base_lo |= PCI_IO_RANGE_TYPE_32;
450 } else
451 io_base_lo |= PCI_IO_RANGE_TYPE_16;
452 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
453 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
454
455 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
456 == IORESOURCE_MEM) {
457 off = hose->pci_mem_offset;
458 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
459 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
460 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
461 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
462
463 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
464 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
465 off = hose->pci_mem_offset;
466 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
467 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
468 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
469 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
470
471 } else {
472 DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
473 pci_name(dev), i, res->flags);
474 }
475 pci_write_config_word(dev, PCI_COMMAND, cmd);
476}
477
478static inline void alloc_resource(struct pci_dev *dev, int idx)
479{
480 struct resource *pr, *r = &dev->resource[idx];
481
685143ac 482 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
e05b3b4a
PM
483 pci_name(dev), idx, r->start, r->end, r->flags);
484 pr = pci_find_parent_resource(dev, r);
485 if (!pr || request_resource(pr, r) < 0) {
486 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
487 " of device %s\n", idx, pci_name(dev));
488 if (pr)
685143ac 489 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
e05b3b4a
PM
490 pr, pr->start, pr->end, pr->flags);
491 /* We'll assign a new address later */
492 r->flags |= IORESOURCE_UNSET;
493 r->end -= r->start;
494 r->start = 0;
495 }
496}
497
498static void __init
499pcibios_allocate_resources(int pass)
500{
501 struct pci_dev *dev = NULL;
502 int idx, disabled;
503 u16 command;
504 struct resource *r;
505
506 for_each_pci_dev(dev) {
507 pci_read_config_word(dev, PCI_COMMAND, &command);
508 for (idx = 0; idx < 6; idx++) {
509 r = &dev->resource[idx];
510 if (r->parent) /* Already allocated */
511 continue;
512 if (!r->flags || (r->flags & IORESOURCE_UNSET))
513 continue; /* Not assigned at all */
514 if (r->flags & IORESOURCE_IO)
515 disabled = !(command & PCI_COMMAND_IO);
516 else
517 disabled = !(command & PCI_COMMAND_MEMORY);
518 if (pass == disabled)
519 alloc_resource(dev, idx);
520 }
521 if (pass)
522 continue;
523 r = &dev->resource[PCI_ROM_RESOURCE];
524 if (r->flags & IORESOURCE_ROM_ENABLE) {
525 /* Turn the ROM off, leave the resource region, but keep it unregistered. */
526 u32 reg;
527 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
528 r->flags &= ~IORESOURCE_ROM_ENABLE;
529 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
530 pci_write_config_dword(dev, dev->rom_base_reg,
531 reg & ~PCI_ROM_ADDRESS_ENABLE);
532 }
533 }
534}
535
536static void __init
537pcibios_assign_resources(void)
538{
539 struct pci_dev *dev = NULL;
540 int idx;
541 struct resource *r;
542
543 for_each_pci_dev(dev) {
544 int class = dev->class >> 8;
545
546 /* Don't touch classless devices and host bridges */
547 if (!class || class == PCI_CLASS_BRIDGE_HOST)
548 continue;
549
550 for (idx = 0; idx < 6; idx++) {
551 r = &dev->resource[idx];
552
553 /*
554 * We shall assign a new address to this resource,
555 * either because the BIOS (sic) forgot to do so
556 * or because we have decided the old address was
557 * unusable for some reason.
558 */
559 if ((r->flags & IORESOURCE_UNSET) && r->end &&
560 (!ppc_md.pcibios_enable_device_hook ||
561 !ppc_md.pcibios_enable_device_hook(dev, 1))) {
562 r->flags &= ~IORESOURCE_UNSET;
563 pci_assign_resource(dev, idx);
564 }
565 }
566
567#if 0 /* don't assign ROMs */
568 r = &dev->resource[PCI_ROM_RESOURCE];
569 r->end -= r->start;
570 r->start = 0;
571 if (r->end)
572 pci_assign_resource(dev, PCI_ROM_RESOURCE);
573#endif
574 }
575}
576
577
578int
579pcibios_enable_resources(struct pci_dev *dev, int mask)
580{
581 u16 cmd, old_cmd;
582 int idx;
583 struct resource *r;
584
585 pci_read_config_word(dev, PCI_COMMAND, &cmd);
586 old_cmd = cmd;
587 for (idx=0; idx<6; idx++) {
588 /* Only set up the requested stuff */
589 if (!(mask & (1<<idx)))
590 continue;
591
592 r = &dev->resource[idx];
593 if (r->flags & IORESOURCE_UNSET) {
594 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
595 return -EINVAL;
596 }
597 if (r->flags & IORESOURCE_IO)
598 cmd |= PCI_COMMAND_IO;
599 if (r->flags & IORESOURCE_MEM)
600 cmd |= PCI_COMMAND_MEMORY;
601 }
602 if (dev->resource[PCI_ROM_RESOURCE].start)
603 cmd |= PCI_COMMAND_MEMORY;
604 if (cmd != old_cmd) {
605 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
606 pci_write_config_word(dev, PCI_COMMAND, cmd);
607 }
608 return 0;
609}
610
611static int next_controller_index;
612
613struct pci_controller * __init
614pcibios_alloc_controller(void)
615{
616 struct pci_controller *hose;
617
618 hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
619 memset(hose, 0, sizeof(struct pci_controller));
620
621 *hose_tail = hose;
622 hose_tail = &hose->next;
623
624 hose->index = next_controller_index++;
625
626 return hose;
627}
628
629#ifdef CONFIG_PPC_OF
630/*
631 * Functions below are used on OpenFirmware machines.
632 */
633static void
634make_one_node_map(struct device_node* node, u8 pci_bus)
635{
636 int *bus_range;
637 int len;
638
639 if (pci_bus >= pci_bus_count)
640 return;
641 bus_range = (int *) get_property(node, "bus-range", &len);
642 if (bus_range == NULL || len < 2 * sizeof(int)) {
643 printk(KERN_WARNING "Can't get bus-range for %s, "
644 "assuming it starts at 0\n", node->full_name);
645 pci_to_OF_bus_map[pci_bus] = 0;
646 } else
647 pci_to_OF_bus_map[pci_bus] = bus_range[0];
648
649 for (node=node->child; node != 0;node = node->sibling) {
650 struct pci_dev* dev;
651 unsigned int *class_code, *reg;
652
653 class_code = (unsigned int *) get_property(node, "class-code", NULL);
654 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
655 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
656 continue;
657 reg = (unsigned int *)get_property(node, "reg", NULL);
658 if (!reg)
659 continue;
660 dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff));
661 if (!dev || !dev->subordinate)
662 continue;
663 make_one_node_map(node, dev->subordinate->number);
664 }
665}
666
667void
668pcibios_make_OF_bus_map(void)
669{
670 int i;
671 struct pci_controller* hose;
672 u8* of_prop_map;
673
674 pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL);
675 if (!pci_to_OF_bus_map) {
676 printk(KERN_ERR "Can't allocate OF bus map !\n");
677 return;
678 }
679
680 /* We fill the bus map with invalid values, that helps
681 * debugging.
682 */
683 for (i=0; i<pci_bus_count; i++)
684 pci_to_OF_bus_map[i] = 0xff;
685
686 /* For each hose, we begin searching bridges */
687 for(hose=hose_head; hose; hose=hose->next) {
688 struct device_node* node;
689 node = (struct device_node *)hose->arch_data;
690 if (!node)
691 continue;
692 make_one_node_map(node, hose->first_busno);
693 }
694 of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL);
695 if (of_prop_map)
696 memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count);
697#ifdef DEBUG
698 printk("PCI->OF bus map:\n");
699 for (i=0; i<pci_bus_count; i++) {
700 if (pci_to_OF_bus_map[i] == 0xff)
701 continue;
702 printk("%d -> %d\n", i, pci_to_OF_bus_map[i]);
703 }
704#endif
705}
706
707typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
708
709static struct device_node*
710scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
711{
712 struct device_node* sub_node;
713
714 for (; node != 0;node = node->sibling) {
715 unsigned int *class_code;
716
717 if (filter(node, data))
718 return node;
719
720 /* For PCI<->PCI bridges or CardBus bridges, we go down
721 * Note: some OFs create a parent node "multifunc-device" as
722 * a fake root for all functions of a multi-function device,
723 * we go down them as well.
724 */
725 class_code = (unsigned int *) get_property(node, "class-code", NULL);
726 if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
727 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
728 strcmp(node->name, "multifunc-device"))
729 continue;
730 sub_node = scan_OF_pci_childs(node->child, filter, data);
731 if (sub_node)
732 return sub_node;
733 }
734 return NULL;
735}
736
737static int
738scan_OF_pci_childs_iterator(struct device_node* node, void* data)
739{
740 unsigned int *reg;
741 u8* fdata = (u8*)data;
742
743 reg = (unsigned int *) get_property(node, "reg", NULL);
744 if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
745 && ((reg[0] >> 16) & 0xff) == fdata[0])
746 return 1;
747 return 0;
748}
749
750static struct device_node*
751scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
752{
753 u8 filter_data[2] = {bus, dev_fn};
754
755 return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
756}
757
758/*
759 * Scans the OF tree for a device node matching a PCI device
760 */
761struct device_node *
762pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
763{
764 struct pci_controller *hose;
765 struct device_node *node;
766 int busnr;
767
768 if (!have_of)
769 return NULL;
770
771 /* Lookup the hose */
772 busnr = bus->number;
773 hose = pci_bus_to_hose(busnr);
774 if (!hose)
775 return NULL;
776
777 /* Check it has an OF node associated */
778 node = (struct device_node *) hose->arch_data;
779 if (!node)
780 return NULL;
781
782 /* Fixup bus number according to what OF think it is. */
783#ifdef CONFIG_PPC_PMAC
784 /* The G5 need a special case here. Basically, we don't remap all
785 * busses on it so we don't create the pci-OF-map. However, we do
786 * remap the AGP bus and so have to deal with it. A future better
787 * fix has to be done by making the remapping per-host and always
788 * filling the pci_to_OF map. --BenH
789 */
e8222502 790 if (machine_is(powermac) && busnr >= 0xf0)
e05b3b4a
PM
791 busnr -= 0xf0;
792 else
793#endif
794 if (pci_to_OF_bus_map)
795 busnr = pci_to_OF_bus_map[busnr];
796 if (busnr == 0xff)
797 return NULL;
798
799 /* Now, lookup childs of the hose */
800 return scan_OF_childs_for_device(node->child, busnr, devfn);
801}
802EXPORT_SYMBOL(pci_busdev_to_OF_node);
803
804struct device_node*
805pci_device_to_OF_node(struct pci_dev *dev)
806{
807 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
808}
809EXPORT_SYMBOL(pci_device_to_OF_node);
810
811/* This routine is meant to be used early during boot, when the
812 * PCI bus numbers have not yet been assigned, and you need to
813 * issue PCI config cycles to an OF device.
814 * It could also be used to "fix" RTAS config cycles if you want
815 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
816 * config cycles.
817 */
818struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
819{
820 if (!have_of)
821 return NULL;
822 while(node) {
823 struct pci_controller* hose;
824 for (hose=hose_head;hose;hose=hose->next)
825 if (hose->arch_data == node)
826 return hose;
827 node=node->parent;
828 }
829 return NULL;
830}
831
832static int
833find_OF_pci_device_filter(struct device_node* node, void* data)
834{
835 return ((void *)node == data);
836}
837
838/*
839 * Returns the PCI device matching a given OF node
840 */
841int
842pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
843{
844 unsigned int *reg;
845 struct pci_controller* hose;
846 struct pci_dev* dev = NULL;
847
848 if (!have_of)
849 return -ENODEV;
850 /* Make sure it's really a PCI device */
851 hose = pci_find_hose_for_OF_device(node);
852 if (!hose || !hose->arch_data)
853 return -ENODEV;
854 if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child,
855 find_OF_pci_device_filter, (void *)node))
856 return -ENODEV;
857 reg = (unsigned int *) get_property(node, "reg", NULL);
858 if (!reg)
859 return -ENODEV;
860 *bus = (reg[0] >> 16) & 0xff;
861 *devfn = ((reg[0] >> 8) & 0xff);
862
863 /* Ok, here we need some tweak. If we have already renumbered
864 * all busses, we can't rely on the OF bus number any more.
865 * the pci_to_OF_bus_map is not enough as several PCI busses
866 * may match the same OF bus number.
867 */
868 if (!pci_to_OF_bus_map)
869 return 0;
870
871 for_each_pci_dev(dev)
872 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
873 dev->devfn == *devfn) {
874 *bus = dev->bus->number;
875 pci_dev_put(dev);
876 return 0;
877 }
878
879 return -ENODEV;
880}
881EXPORT_SYMBOL(pci_device_from_OF_node);
882
883void __init
884pci_process_bridge_OF_ranges(struct pci_controller *hose,
885 struct device_node *dev, int primary)
886{
887 static unsigned int static_lc_ranges[256] __initdata;
888 unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
889 unsigned int size;
890 int rlen = 0, orig_rlen;
891 int memno = 0;
892 struct resource *res;
893 int np, na = prom_n_addr_cells(dev);
894 np = na + 5;
895
896 /* First we try to merge ranges to fix a problem with some pmacs
897 * that can have more than 3 ranges, fortunately using contiguous
898 * addresses -- BenH
899 */
900 dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
901 if (!dt_ranges)
902 return;
903 /* Sanity check, though hopefully that never happens */
904 if (rlen > sizeof(static_lc_ranges)) {
905 printk(KERN_WARNING "OF ranges property too large !\n");
906 rlen = sizeof(static_lc_ranges);
907 }
908 lc_ranges = static_lc_ranges;
909 memcpy(lc_ranges, dt_ranges, rlen);
910 orig_rlen = rlen;
911
912 /* Let's work on a copy of the "ranges" property instead of damaging
913 * the device-tree image in memory
914 */
915 ranges = lc_ranges;
916 prev = NULL;
917 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
918 if (prev) {
919 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
920 (prev[2] + prev[na+4]) == ranges[2] &&
921 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
922 prev[na+4] += ranges[na+4];
923 ranges[0] = 0;
924 ranges += np;
925 continue;
926 }
927 }
928 prev = ranges;
929 ranges += np;
930 }
931
932 /*
933 * The ranges property is laid out as an array of elements,
934 * each of which comprises:
935 * cells 0 - 2: a PCI address
936 * cells 3 or 3+4: a CPU physical address
937 * (size depending on dev->n_addr_cells)
938 * cells 4+5 or 5+6: the size of the range
939 */
940 ranges = lc_ranges;
941 rlen = orig_rlen;
942 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
943 res = NULL;
944 size = ranges[na+4];
945 switch ((ranges[0] >> 24) & 0x3) {
946 case 1: /* I/O space */
947 if (ranges[2] != 0)
948 break;
949 hose->io_base_phys = ranges[na+2];
950 /* limit I/O space to 16MB */
951 if (size > 0x01000000)
952 size = 0x01000000;
953 hose->io_base_virt = ioremap(ranges[na+2], size);
954 if (primary)
955 isa_io_base = (unsigned long) hose->io_base_virt;
956 res = &hose->io_resource;
957 res->flags = IORESOURCE_IO;
958 res->start = ranges[2];
685143ac 959 DBG("PCI: IO 0x%llx -> 0x%llx\n",
e05b3b4a
PM
960 res->start, res->start + size - 1);
961 break;
962 case 2: /* memory space */
963 memno = 0;
964 if (ranges[1] == 0 && ranges[2] == 0
965 && ranges[na+4] <= (16 << 20)) {
966 /* 1st 16MB, i.e. ISA memory area */
967 if (primary)
968 isa_mem_base = ranges[na+2];
969 memno = 1;
970 }
971 while (memno < 3 && hose->mem_resources[memno].flags)
972 ++memno;
973 if (memno == 0)
974 hose->pci_mem_offset = ranges[na+2] - ranges[2];
975 if (memno < 3) {
976 res = &hose->mem_resources[memno];
977 res->flags = IORESOURCE_MEM;
978 if(ranges[0] & 0x40000000)
979 res->flags |= IORESOURCE_PREFETCH;
980 res->start = ranges[na+2];
685143ac 981 DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
e05b3b4a
PM
982 res->start, res->start + size - 1);
983 }
984 break;
985 }
986 if (res != NULL) {
987 res->name = dev->full_name;
988 res->end = res->start + size - 1;
989 res->parent = NULL;
990 res->sibling = NULL;
991 res->child = NULL;
992 }
993 ranges += np;
994 }
995}
996
997/* We create the "pci-OF-bus-map" property now so it appears in the
998 * /proc device tree
999 */
1000void __init
1001pci_create_OF_bus_map(void)
1002{
1003 struct property* of_prop;
1004
1005 of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256);
1006 if (of_prop && find_path_device("/")) {
1007 memset(of_prop, -1, sizeof(struct property) + 256);
1008 of_prop->name = "pci-OF-bus-map";
1009 of_prop->length = 256;
1010 of_prop->value = (unsigned char *)&of_prop[1];
1011 prom_add_property(find_path_device("/"), of_prop);
1012 }
1013}
1014
1015static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
1016{
1017 struct pci_dev *pdev;
1018 struct device_node *np;
1019
1020 pdev = to_pci_dev (dev);
1021 np = pci_device_to_OF_node(pdev);
1022 if (np == NULL || np->full_name == NULL)
1023 return 0;
1024 return sprintf(buf, "%s", np->full_name);
1025}
1026static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1027
1028#else /* CONFIG_PPC_OF */
1029void pcibios_make_OF_bus_map(void)
1030{
1031}
1032#endif /* CONFIG_PPC_OF */
1033
1034/* Add sysfs properties */
1035void pcibios_add_platform_entries(struct pci_dev *pdev)
1036{
1037#ifdef CONFIG_PPC_OF
1038 device_create_file(&pdev->dev, &dev_attr_devspec);
1039#endif /* CONFIG_PPC_OF */
1040}
1041
1042
1043#ifdef CONFIG_PPC_PMAC
1044/*
1045 * This set of routines checks for PCI<->PCI bridges that have closed
1046 * IO resources and have child devices. It tries to re-open an IO
1047 * window on them.
1048 *
1049 * This is a _temporary_ fix to workaround a problem with Apple's OF
1050 * closing IO windows on P2P bridges when the OF drivers of cards
1051 * below this bridge don't claim any IO range (typically ATI or
1052 * Adaptec).
1053 *
1054 * A more complete fix would be to use drivers/pci/setup-bus.c, which
1055 * involves a working pcibios_fixup_pbus_ranges(), some more care about
1056 * ordering when creating the host bus resources, and maybe a few more
1057 * minor tweaks
1058 */
1059
1060/* Initialize bridges with base/limit values we have collected */
1061static void __init
1062do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga)
1063{
1064 struct pci_dev *bridge = bus->self;
1065 struct pci_controller* hose = (struct pci_controller *)bridge->sysdata;
1066 u32 l;
1067 u16 w;
1068 struct resource res;
1069
1070 if (bus->resource[0] == NULL)
1071 return;
1072 res = *(bus->resource[0]);
1073
1074 DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge));
1075 res.start -= ((unsigned long) hose->io_base_virt - isa_io_base);
1076 res.end -= ((unsigned long) hose->io_base_virt - isa_io_base);
685143ac 1077 DBG(" IO window: %016llx-%016llx\n", res.start, res.end);
e05b3b4a
PM
1078
1079 /* Set up the top and bottom of the PCI I/O segment for this bus. */
1080 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
1081 l &= 0xffff000f;
1082 l |= (res.start >> 8) & 0x00f0;
1083 l |= res.end & 0xf000;
1084 pci_write_config_dword(bridge, PCI_IO_BASE, l);
1085
1086 if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
1087 l = (res.start >> 16) | (res.end & 0xffff0000);
1088 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l);
1089 }
1090
1091 pci_read_config_word(bridge, PCI_COMMAND, &w);
1092 w |= PCI_COMMAND_IO;
1093 pci_write_config_word(bridge, PCI_COMMAND, w);
1094
1095#if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */
1096 if (enable_vga) {
1097 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w);
1098 w |= PCI_BRIDGE_CTL_VGA;
1099 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w);
1100 }
1101#endif
1102}
1103
1104/* This function is pretty basic and actually quite broken for the
1105 * general case, it's enough for us right now though. It's supposed
1106 * to tell us if we need to open an IO range at all or not and what
1107 * size.
1108 */
1109static int __init
1110check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga)
1111{
1112 struct pci_dev *dev;
1113 int i;
1114 int rc = 0;
1115
0f582bc1
PM
1116#define push_end(res, mask) do { \
1117 BUG_ON((mask+1) & mask); \
1118 res->end = (res->end + mask) | mask; \
1119} while (0)
e05b3b4a
PM
1120
1121 list_for_each_entry(dev, &bus->devices, bus_list) {
1122 u16 class = dev->class >> 8;
1123
1124 if (class == PCI_CLASS_DISPLAY_VGA ||
1125 class == PCI_CLASS_NOT_DEFINED_VGA)
1126 *found_vga = 1;
1127 if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate)
1128 rc |= check_for_io_childs(dev->subordinate, res, found_vga);
1129 if (class == PCI_CLASS_BRIDGE_CARDBUS)
1130 push_end(res, 0xfff);
1131
1132 for (i=0; i<PCI_NUM_RESOURCES; i++) {
1133 struct resource *r;
1134 unsigned long r_size;
1135
1136 if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI
1137 && i >= PCI_BRIDGE_RESOURCES)
1138 continue;
1139 r = &dev->resource[i];
1140 r_size = r->end - r->start;
1141 if (r_size < 0xfff)
1142 r_size = 0xfff;
1143 if (r->flags & IORESOURCE_IO && (r_size) != 0) {
1144 rc = 1;
1145 push_end(res, r_size);
1146 }
1147 }
1148 }
1149
1150 return rc;
1151}
1152
1153/* Here we scan all P2P bridges of a given level that have a closed
1154 * IO window. Note that the test for the presence of a VGA card should
1155 * be improved to take into account already configured P2P bridges,
1156 * currently, we don't see them and might end up configuring 2 bridges
1157 * with VGA pass through enabled
1158 */
1159static void __init
1160do_fixup_p2p_level(struct pci_bus *bus)
1161{
1162 struct pci_bus *b;
1163 int i, parent_io;
1164 int has_vga = 0;
1165
1166 for (parent_io=0; parent_io<4; parent_io++)
1167 if (bus->resource[parent_io]
1168 && bus->resource[parent_io]->flags & IORESOURCE_IO)
1169 break;
1170 if (parent_io >= 4)
1171 return;
1172
1173 list_for_each_entry(b, &bus->children, node) {
1174 struct pci_dev *d = b->self;
1175 struct pci_controller* hose = (struct pci_controller *)d->sysdata;
1176 struct resource *res = b->resource[0];
1177 struct resource tmp_res;
1178 unsigned long max;
1179 int found_vga = 0;
1180
1181 memset(&tmp_res, 0, sizeof(tmp_res));
1182 tmp_res.start = bus->resource[parent_io]->start;
1183
1184 /* We don't let low addresses go through that closed P2P bridge, well,
1185 * that may not be necessary but I feel safer that way
1186 */
1187 if (tmp_res.start == 0)
1188 tmp_res.start = 0x1000;
1189
1190 if (!list_empty(&b->devices) && res && res->flags == 0 &&
1191 res != bus->resource[parent_io] &&
1192 (d->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
1193 check_for_io_childs(b, &tmp_res, &found_vga)) {
1194 u8 io_base_lo;
1195
1196 printk(KERN_INFO "Fixing up IO bus %s\n", b->name);
1197
1198 if (found_vga) {
1199 if (has_vga) {
1200 printk(KERN_WARNING "Skipping VGA, already active"
1201 " on bus segment\n");
1202 found_vga = 0;
1203 } else
1204 has_vga = 1;
1205 }
1206 pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo);
1207
1208 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32)
1209 max = ((unsigned long) hose->io_base_virt
1210 - isa_io_base) + 0xffffffff;
1211 else
1212 max = ((unsigned long) hose->io_base_virt
1213 - isa_io_base) + 0xffff;
1214
1215 *res = tmp_res;
1216 res->flags = IORESOURCE_IO;
1217 res->name = b->name;
1218
1219 /* Find a resource in the parent where we can allocate */
1220 for (i = 0 ; i < 4; i++) {
1221 struct resource *r = bus->resource[i];
1222 if (!r)
1223 continue;
1224 if ((r->flags & IORESOURCE_IO) == 0)
1225 continue;
685143ac
GKH
1226 DBG("Trying to allocate from %016llx, size %016llx from parent"
1227 " res %d: %016llx -> %016llx\n",
e05b3b4a
PM
1228 res->start, res->end, i, r->start, r->end);
1229
1230 if (allocate_resource(r, res, res->end + 1, res->start, max,
1231 res->end + 1, NULL, NULL) < 0) {
1232 DBG("Failed !\n");
1233 continue;
1234 }
1235 do_update_p2p_io_resource(b, found_vga);
1236 break;
1237 }
1238 }
1239 do_fixup_p2p_level(b);
1240 }
1241}
1242
1243static void
1244pcibios_fixup_p2p_bridges(void)
1245{
1246 struct pci_bus *b;
1247
1248 list_for_each_entry(b, &pci_root_buses, node)
1249 do_fixup_p2p_level(b);
1250}
1251
1252#endif /* CONFIG_PPC_PMAC */
1253
1254static int __init
1255pcibios_init(void)
1256{
1257 struct pci_controller *hose;
1258 struct pci_bus *bus;
1259 int next_busno;
1260
1261 printk(KERN_INFO "PCI: Probing PCI hardware\n");
1262
1263 /* Scan all of the recorded PCI controllers. */
1264 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1265 if (pci_assign_all_buses)
1266 hose->first_busno = next_busno;
1267 hose->last_busno = 0xff;
1268 bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
1269 hose->last_busno = bus->subordinate;
1270 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1271 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1272 }
1273 pci_bus_count = next_busno;
1274
1275 /* OpenFirmware based machines need a map of OF bus
1276 * numbers vs. kernel bus numbers since we may have to
1277 * remap them.
1278 */
1279 if (pci_assign_all_buses && have_of)
1280 pcibios_make_OF_bus_map();
1281
1282 /* Do machine dependent PCI interrupt routing */
1283 if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
1284 pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
1285
1286 /* Call machine dependent fixup */
1287 if (ppc_md.pcibios_fixup)
1288 ppc_md.pcibios_fixup();
1289
1290 /* Allocate and assign resources */
1291 pcibios_allocate_bus_resources(&pci_root_buses);
1292 pcibios_allocate_resources(0);
1293 pcibios_allocate_resources(1);
1294#ifdef CONFIG_PPC_PMAC
1295 pcibios_fixup_p2p_bridges();
1296#endif /* CONFIG_PPC_PMAC */
1297 pcibios_assign_resources();
1298
1299 /* Call machine dependent post-init code */
1300 if (ppc_md.pcibios_after_init)
1301 ppc_md.pcibios_after_init();
1302
1303 return 0;
1304}
1305
1306subsys_initcall(pcibios_init);
1307
1308unsigned char __init
1309common_swizzle(struct pci_dev *dev, unsigned char *pinp)
1310{
1311 struct pci_controller *hose = dev->sysdata;
1312
1313 if (dev->bus->number != hose->first_busno) {
1314 u8 pin = *pinp;
1315 do {
1316 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
1317 /* Move up the chain of bridges. */
1318 dev = dev->bus->self;
1319 } while (dev->bus->self);
1320 *pinp = pin;
1321
1322 /* The slot is the idsel of the last bridge. */
1323 }
1324 return PCI_SLOT(dev->devfn);
1325}
1326
1327unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
1328 unsigned long start, unsigned long size)
1329{
1330 return start;
1331}
1332
1333void __init pcibios_fixup_bus(struct pci_bus *bus)
1334{
1335 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
1336 unsigned long io_offset;
1337 struct resource *res;
1338 int i;
1339
1340 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1341 if (bus->parent == NULL) {
1342 /* This is a host bridge - fill in its resources */
1343 hose->bus = bus;
1344
1345 bus->resource[0] = res = &hose->io_resource;
1346 if (!res->flags) {
1347 if (io_offset)
1348 printk(KERN_ERR "I/O resource not set for host"
1349 " bridge %d\n", hose->index);
1350 res->start = 0;
1351 res->end = IO_SPACE_LIMIT;
1352 res->flags = IORESOURCE_IO;
1353 }
1354 res->start += io_offset;
1355 res->end += io_offset;
1356
1357 for (i = 0; i < 3; ++i) {
1358 res = &hose->mem_resources[i];
1359 if (!res->flags) {
1360 if (i > 0)
1361 continue;
1362 printk(KERN_ERR "Memory resource not set for "
1363 "host bridge %d\n", hose->index);
1364 res->start = hose->pci_mem_offset;
1365 res->end = ~0U;
1366 res->flags = IORESOURCE_MEM;
1367 }
1368 bus->resource[i+1] = res;
1369 }
1370 } else {
1371 /* This is a subordinate bridge */
1372 pci_read_bridge_bases(bus);
1373
1374 for (i = 0; i < 4; ++i) {
1375 if ((res = bus->resource[i]) == NULL)
1376 continue;
1377 if (!res->flags)
1378 continue;
1379 if (io_offset && (res->flags & IORESOURCE_IO)) {
1380 res->start += io_offset;
1381 res->end += io_offset;
1382 } else if (hose->pci_mem_offset
1383 && (res->flags & IORESOURCE_MEM)) {
1384 res->start += hose->pci_mem_offset;
1385 res->end += hose->pci_mem_offset;
1386 }
1387 }
1388 }
1389
1390 if (ppc_md.pcibios_fixup_bus)
1391 ppc_md.pcibios_fixup_bus(bus);
1392}
1393
1394char __init *pcibios_setup(char *str)
1395{
1396 return str;
1397}
1398
1399/* the next one is stolen from the alpha port... */
1400void __init
1401pcibios_update_irq(struct pci_dev *dev, int irq)
1402{
1403 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
1404 /* XXX FIXME - update OF device tree node interrupt property */
1405}
1406
0ebfff14
BH
1407#ifdef CONFIG_PPC_MERGE
1408/* XXX This is a copy of the ppc64 version. This is temporary until we start
1409 * merging the 2 PCI layers
1410 */
1411/*
1412 * Reads the interrupt pin to determine if interrupt is use by card.
1413 * If the interrupt is used, then gets the interrupt line from the
1414 * openfirmware and sets it in the pci_dev and pci_config line.
1415 */
1416int pci_read_irq_line(struct pci_dev *pci_dev)
1417{
1418 struct of_irq oirq;
1419 unsigned int virq;
1420
1421 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
1422
1423 if (of_irq_map_pci(pci_dev, &oirq)) {
1424 DBG(" -> failed !\n");
1425 return -1;
1426 }
1427
1428 DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
1429 oirq.size, oirq.specifier[0], oirq.controller->full_name);
1430
1431 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size);
1432 if(virq == NO_IRQ) {
1433 DBG(" -> failed to map !\n");
1434 return -1;
1435 }
1436 pci_dev->irq = virq;
1437 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
1438
1439 return 0;
1440}
1441EXPORT_SYMBOL(pci_read_irq_line);
1442#endif /* CONFIG_PPC_MERGE */
1443
e05b3b4a
PM
1444int pcibios_enable_device(struct pci_dev *dev, int mask)
1445{
1446 u16 cmd, old_cmd;
1447 int idx;
1448 struct resource *r;
1449
1450 if (ppc_md.pcibios_enable_device_hook)
1451 if (ppc_md.pcibios_enable_device_hook(dev, 0))
1452 return -EINVAL;
1453
1454 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1455 old_cmd = cmd;
1456 for (idx=0; idx<6; idx++) {
1457 r = &dev->resource[idx];
1458 if (r->flags & IORESOURCE_UNSET) {
1459 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
1460 return -EINVAL;
1461 }
1462 if (r->flags & IORESOURCE_IO)
1463 cmd |= PCI_COMMAND_IO;
1464 if (r->flags & IORESOURCE_MEM)
1465 cmd |= PCI_COMMAND_MEMORY;
1466 }
1467 if (cmd != old_cmd) {
1468 printk("PCI: Enabling device %s (%04x -> %04x)\n",
1469 pci_name(dev), old_cmd, cmd);
1470 pci_write_config_word(dev, PCI_COMMAND, cmd);
1471 }
1472 return 0;
1473}
1474
1475struct pci_controller*
1476pci_bus_to_hose(int bus)
1477{
1478 struct pci_controller* hose = hose_head;
1479
1480 for (; hose; hose = hose->next)
1481 if (bus >= hose->first_busno && bus <= hose->last_busno)
1482 return hose;
1483 return NULL;
1484}
1485
1486void __iomem *
1487pci_bus_io_base(unsigned int bus)
1488{
1489 struct pci_controller *hose;
1490
1491 hose = pci_bus_to_hose(bus);
1492 if (!hose)
1493 return NULL;
1494 return hose->io_base_virt;
1495}
1496
1497unsigned long
1498pci_bus_io_base_phys(unsigned int bus)
1499{
1500 struct pci_controller *hose;
1501
1502 hose = pci_bus_to_hose(bus);
1503 if (!hose)
1504 return 0;
1505 return hose->io_base_phys;
1506}
1507
1508unsigned long
1509pci_bus_mem_base_phys(unsigned int bus)
1510{
1511 struct pci_controller *hose;
1512
1513 hose = pci_bus_to_hose(bus);
1514 if (!hose)
1515 return 0;
1516 return hose->pci_mem_offset;
1517}
1518
1519unsigned long
1520pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
1521{
1522 /* Hack alert again ! See comments in chrp_pci.c
1523 */
1524 struct pci_controller* hose =
1525 (struct pci_controller *)pdev->sysdata;
1526 if (hose && res->flags & IORESOURCE_MEM)
1527 return res->start - hose->pci_mem_offset;
1528 /* We may want to do something with IOs here... */
1529 return res->start;
1530}
1531
1532
1533static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
1534 unsigned long *offset,
1535 enum pci_mmap_state mmap_state)
1536{
1537 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1538 unsigned long io_offset = 0;
1539 int i, res_bit;
1540
1541 if (hose == 0)
1542 return NULL; /* should never happen */
1543
1544 /* If memory, add on the PCI bridge address offset */
1545 if (mmap_state == pci_mmap_mem) {
1546 *offset += hose->pci_mem_offset;
1547 res_bit = IORESOURCE_MEM;
1548 } else {
1549 io_offset = hose->io_base_virt - ___IO_BASE;
1550 *offset += io_offset;
1551 res_bit = IORESOURCE_IO;
1552 }
1553
1554 /*
1555 * Check that the offset requested corresponds to one of the
1556 * resources of the device.
1557 */
1558 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1559 struct resource *rp = &dev->resource[i];
1560 int flags = rp->flags;
1561
1562 /* treat ROM as memory (should be already) */
1563 if (i == PCI_ROM_RESOURCE)
1564 flags |= IORESOURCE_MEM;
1565
1566 /* Active and same type? */
1567 if ((flags & res_bit) == 0)
1568 continue;
1569
1570 /* In the range of this resource? */
1571 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
1572 continue;
1573
1574 /* found it! construct the final physical address */
1575 if (mmap_state == pci_mmap_io)
1576 *offset += hose->io_base_phys - io_offset;
1577 return rp;
1578 }
1579
1580 return NULL;
1581}
1582
1583/*
1584 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
1585 * device mapping.
1586 */
1587static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1588 pgprot_t protection,
1589 enum pci_mmap_state mmap_state,
1590 int write_combine)
1591{
1592 unsigned long prot = pgprot_val(protection);
1593
1594 /* Write combine is always 0 on non-memory space mappings. On
1595 * memory space, if the user didn't pass 1, we check for a
1596 * "prefetchable" resource. This is a bit hackish, but we use
1597 * this to workaround the inability of /sysfs to provide a write
1598 * combine bit
1599 */
1600 if (mmap_state != pci_mmap_mem)
1601 write_combine = 0;
1602 else if (write_combine == 0) {
1603 if (rp->flags & IORESOURCE_PREFETCH)
1604 write_combine = 1;
1605 }
1606
1607 /* XXX would be nice to have a way to ask for write-through */
1608 prot |= _PAGE_NO_CACHE;
1609 if (write_combine)
1610 prot &= ~_PAGE_GUARDED;
1611 else
1612 prot |= _PAGE_GUARDED;
1613
685143ac
GKH
1614 printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
1615 (unsigned long long)rp->start, prot);
e05b3b4a
PM
1616
1617 return __pgprot(prot);
1618}
1619
1620/*
1621 * This one is used by /dev/mem and fbdev who have no clue about the
1622 * PCI device, it tries to find the PCI device first and calls the
1623 * above routine
1624 */
1625pgprot_t pci_phys_mem_access_prot(struct file *file,
1626 unsigned long pfn,
1627 unsigned long size,
1628 pgprot_t protection)
1629{
1630 struct pci_dev *pdev = NULL;
1631 struct resource *found = NULL;
1632 unsigned long prot = pgprot_val(protection);
1633 unsigned long offset = pfn << PAGE_SHIFT;
1634 int i;
1635
1636 if (page_is_ram(pfn))
1637 return prot;
1638
1639 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
1640
1641 for_each_pci_dev(pdev) {
1642 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
1643 struct resource *rp = &pdev->resource[i];
1644 int flags = rp->flags;
1645
1646 /* Active and same type? */
1647 if ((flags & IORESOURCE_MEM) == 0)
1648 continue;
1649 /* In the range of this resource? */
1650 if (offset < (rp->start & PAGE_MASK) ||
1651 offset > rp->end)
1652 continue;
1653 found = rp;
1654 break;
1655 }
1656 if (found)
1657 break;
1658 }
1659 if (found) {
1660 if (found->flags & IORESOURCE_PREFETCH)
1661 prot &= ~_PAGE_GUARDED;
1662 pci_dev_put(pdev);
1663 }
1664
1665 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
1666
1667 return __pgprot(prot);
1668}
1669
1670
1671/*
1672 * Perform the actual remap of the pages for a PCI device mapping, as
1673 * appropriate for this architecture. The region in the process to map
1674 * is described by vm_start and vm_end members of VMA, the base physical
1675 * address is found in vm_pgoff.
1676 * The pci device structure is provided so that architectures may make mapping
1677 * decisions on a per-device or per-bus basis.
1678 *
1679 * Returns a negative error code on failure, zero on success.
1680 */
1681int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1682 enum pci_mmap_state mmap_state,
1683 int write_combine)
1684{
1685 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1686 struct resource *rp;
1687 int ret;
1688
1689 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
1690 if (rp == NULL)
1691 return -EINVAL;
1692
1693 vma->vm_pgoff = offset >> PAGE_SHIFT;
e05b3b4a
PM
1694 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1695 vma->vm_page_prot,
1696 mmap_state, write_combine);
1697
1698 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1699 vma->vm_end - vma->vm_start, vma->vm_page_prot);
1700
1701 return ret;
1702}
1703
1704/* Obsolete functions. Should be removed once the symbios driver
1705 * is fixed
1706 */
1707unsigned long
1708phys_to_bus(unsigned long pa)
1709{
1710 struct pci_controller *hose;
1711 int i;
1712
1713 for (hose = hose_head; hose; hose = hose->next) {
1714 for (i = 0; i < 3; ++i) {
1715 if (pa >= hose->mem_resources[i].start
1716 && pa <= hose->mem_resources[i].end) {
1717 /*
1718 * XXX the hose->pci_mem_offset really
1719 * only applies to mem_resources[0].
1720 * We need a way to store an offset for
1721 * the others. -- paulus
1722 */
1723 if (i == 0)
1724 pa -= hose->pci_mem_offset;
1725 return pa;
1726 }
1727 }
1728 }
1729 /* hmmm, didn't find it */
1730 return 0;
1731}
1732
1733unsigned long
1734pci_phys_to_bus(unsigned long pa, int busnr)
1735{
1736 struct pci_controller* hose = pci_bus_to_hose(busnr);
1737 if (!hose)
1738 return pa;
1739 return pa - hose->pci_mem_offset;
1740}
1741
1742unsigned long
1743pci_bus_to_phys(unsigned int ba, int busnr)
1744{
1745 struct pci_controller* hose = pci_bus_to_hose(busnr);
1746 if (!hose)
1747 return ba;
1748 return ba + hose->pci_mem_offset;
1749}
1750
1751/* Provide information on locations of various I/O regions in physical
1752 * memory. Do this on a per-card basis so that we choose the right
1753 * root bridge.
1754 * Note that the returned IO or memory base is a physical address
1755 */
1756
1757long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1758{
1759 struct pci_controller* hose;
1760 long result = -EOPNOTSUPP;
1761
1762 /* Argh ! Please forgive me for that hack, but that's the
1763 * simplest way to get existing XFree to not lockup on some
1764 * G5 machines... So when something asks for bus 0 io base
1765 * (bus 0 is HT root), we return the AGP one instead.
1766 */
1767#ifdef CONFIG_PPC_PMAC
e8222502 1768 if (machine_is(powermac) && machine_is_compatible("MacRISC4"))
e05b3b4a
PM
1769 if (bus == 0)
1770 bus = 0xf0;
1771#endif /* CONFIG_PPC_PMAC */
1772
1773 hose = pci_bus_to_hose(bus);
1774 if (!hose)
1775 return -ENODEV;
1776
1777 switch (which) {
1778 case IOBASE_BRIDGE_NUMBER:
1779 return (long)hose->first_busno;
1780 case IOBASE_MEMORY:
1781 return (long)hose->pci_mem_offset;
1782 case IOBASE_IO:
1783 return (long)hose->io_base_phys;
1784 case IOBASE_ISA_IO:
1785 return (long)isa_io_base;
1786 case IOBASE_ISA_MEM:
1787 return (long)isa_mem_base;
1788 }
1789
1790 return result;
1791}
1792
1793void pci_resource_to_user(const struct pci_dev *dev, int bar,
1794 const struct resource *rsrc,
e31dd6e4 1795 resource_size_t *start, resource_size_t *end)
e05b3b4a
PM
1796{
1797 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1798 unsigned long offset = 0;
1799
1800 if (hose == NULL)
1801 return;
1802
1803 if (rsrc->flags & IORESOURCE_IO)
1804 offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
1805
1806 *start = rsrc->start + offset;
1807 *end = rsrc->end + offset;
1808}
1809
1810void __init
1811pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
1812 int flags, char *name)
1813{
1814 res->start = start;
1815 res->end = end;
1816 res->flags = flags;
1817 res->name = name;
1818 res->parent = NULL;
1819 res->sibling = NULL;
1820 res->child = NULL;
1821}
1822
1823void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
1824{
1825 unsigned long start = pci_resource_start(dev, bar);
1826 unsigned long len = pci_resource_len(dev, bar);
1827 unsigned long flags = pci_resource_flags(dev, bar);
1828
1829 if (!len)
1830 return NULL;
1831 if (max && len > max)
1832 len = max;
1833 if (flags & IORESOURCE_IO)
1834 return ioport_map(start, len);
1835 if (flags & IORESOURCE_MEM)
1836 /* Not checking IORESOURCE_CACHEABLE because PPC does
1837 * not currently distinguish between ioremap and
1838 * ioremap_nocache.
1839 */
1840 return ioremap(start, len);
1841 /* What? */
1842 return NULL;
1843}
1844
1845void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1846{
1847 /* Nothing to do */
1848}
1849EXPORT_SYMBOL(pci_iomap);
1850EXPORT_SYMBOL(pci_iounmap);
1851
1852unsigned long pci_address_to_pio(phys_addr_t address)
1853{
1854 struct pci_controller* hose = hose_head;
1855
1856 for (; hose; hose = hose->next) {
1857 unsigned int size = hose->io_resource.end -
1858 hose->io_resource.start + 1;
1859 if (address >= hose->io_base_phys &&
1860 address < (hose->io_base_phys + size)) {
1861 unsigned long base =
1862 (unsigned long)hose->io_base_virt - _IO_BASE;
1863 return base + (address - hose->io_base_phys);
1864 }
1865 }
1866 return (unsigned int)-1;
1867}
1868EXPORT_SYMBOL(pci_address_to_pio);
1869
1870/*
1871 * Null PCI config access functions, for the case when we can't
1872 * find a hose.
1873 */
1874#define NULL_PCI_OP(rw, size, type) \
1875static int \
1876null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1877{ \
1878 return PCIBIOS_DEVICE_NOT_FOUND; \
1879}
1880
1881static int
1882null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1883 int len, u32 *val)
1884{
1885 return PCIBIOS_DEVICE_NOT_FOUND;
1886}
1887
1888static int
1889null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1890 int len, u32 val)
1891{
1892 return PCIBIOS_DEVICE_NOT_FOUND;
1893}
1894
1895static struct pci_ops null_pci_ops =
1896{
1897 null_read_config,
1898 null_write_config
1899};
1900
1901/*
1902 * These functions are used early on before PCI scanning is done
1903 * and all of the pci_dev and pci_bus structures have been created.
1904 */
1905static struct pci_bus *
1906fake_pci_bus(struct pci_controller *hose, int busnr)
1907{
1908 static struct pci_bus bus;
1909
1910 if (hose == 0) {
1911 hose = pci_bus_to_hose(busnr);
1912 if (hose == 0)
1913 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1914 }
1915 bus.number = busnr;
1916 bus.sysdata = hose;
1917 bus.ops = hose? hose->ops: &null_pci_ops;
1918 return &bus;
1919}
1920
1921#define EARLY_PCI_OP(rw, size, type) \
1922int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1923 int devfn, int offset, type value) \
1924{ \
1925 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1926 devfn, offset, value); \
1927}
1928
1929EARLY_PCI_OP(read, byte, u8 *)
1930EARLY_PCI_OP(read, word, u16 *)
1931EARLY_PCI_OP(read, dword, u32 *)
1932EARLY_PCI_OP(write, byte, u8)
1933EARLY_PCI_OP(write, word, u16)
1934EARLY_PCI_OP(write, dword, u32)