2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/bootmem.h>
23 #include <linux/export.h>
25 #include <asm/machvec.h>
31 #include <asm/hw_irq.h>
34 * Low-level SAL-based PCI configuration access functions. Note that SAL
35 * calls are already serialized (via sal_lock), so we don't need another
36 * synchronization mechanism here.
39 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
40 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
42 /* SAL 3.2 adds support for extended config space. */
44 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
45 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
47 int raw_pci_read(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
48 int reg
, int len
, u32
*value
)
53 if (!value
|| (seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
56 if ((seg
| reg
) <= 255) {
57 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
59 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
60 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
66 result
= ia64_sal_pci_config_read(addr
, mode
, len
, &data
);
74 int raw_pci_write(unsigned int seg
, unsigned int bus
, unsigned int devfn
,
75 int reg
, int len
, u32 value
)
80 if ((seg
> 65535) || (bus
> 255) || (devfn
> 255) || (reg
> 4095))
83 if ((seg
| reg
) <= 255) {
84 addr
= PCI_SAL_ADDRESS(seg
, bus
, devfn
, reg
);
86 } else if (sal_revision
>= SAL_VERSION_CODE(3,2)) {
87 addr
= PCI_SAL_EXT_ADDRESS(seg
, bus
, devfn
, reg
);
92 result
= ia64_sal_pci_config_write(addr
, mode
, len
, value
);
98 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
101 return raw_pci_read(pci_domain_nr(bus
), bus
->number
,
102 devfn
, where
, size
, value
);
105 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
108 return raw_pci_write(pci_domain_nr(bus
), bus
->number
,
109 devfn
, where
, size
, value
);
112 struct pci_ops pci_root_ops
= {
117 /* Called by ACPI when it finds a new root bus. */
119 static struct pci_controller
* __devinit
120 alloc_pci_controller (int seg
)
122 struct pci_controller
*controller
;
124 controller
= kzalloc(sizeof(*controller
), GFP_KERNEL
);
128 controller
->segment
= seg
;
129 controller
->node
= -1;
133 struct pci_root_info
{
134 struct acpi_device
*bridge
;
135 struct pci_controller
*controller
;
136 struct list_head resources
;
141 new_space (u64 phys_base
, int sparse
)
147 return 0; /* legacy I/O port space */
149 mmio_base
= (u64
) ioremap(phys_base
, 0);
150 for (i
= 0; i
< num_io_spaces
; i
++)
151 if (io_space
[i
].mmio_base
== mmio_base
&&
152 io_space
[i
].sparse
== sparse
)
155 if (num_io_spaces
== MAX_IO_SPACES
) {
156 printk(KERN_ERR
"PCI: Too many IO port spaces "
157 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES
);
162 io_space
[i
].mmio_base
= mmio_base
;
163 io_space
[i
].sparse
= sparse
;
169 add_io_space (struct pci_root_info
*info
, struct acpi_resource_address64
*addr
)
171 struct resource
*resource
;
173 unsigned long base
, min
, max
, base_port
;
174 unsigned int sparse
= 0, space_nr
, len
;
176 resource
= kzalloc(sizeof(*resource
), GFP_KERNEL
);
178 printk(KERN_ERR
"PCI: No memory for %s I/O port space\n",
183 len
= strlen(info
->name
) + 32;
184 name
= kzalloc(len
, GFP_KERNEL
);
186 printk(KERN_ERR
"PCI: No memory for %s I/O port space name\n",
192 max
= min
+ addr
->address_length
- 1;
193 if (addr
->info
.io
.translation_type
== ACPI_SPARSE_TRANSLATION
)
196 space_nr
= new_space(addr
->translation_offset
, sparse
);
200 base
= __pa(io_space
[space_nr
].mmio_base
);
201 base_port
= IO_SPACE_BASE(space_nr
);
202 snprintf(name
, len
, "%s I/O Ports %08lx-%08lx", info
->name
,
203 base_port
+ min
, base_port
+ max
);
206 * The SDM guarantees the legacy 0-64K space is sparse, but if the
207 * mapping is done by the processor (not the bridge), ACPI may not
213 resource
->name
= name
;
214 resource
->flags
= IORESOURCE_MEM
;
215 resource
->start
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(min
) : min
);
216 resource
->end
= base
+ (sparse
? IO_SPACE_SPARSE_ENCODING(max
) : max
);
217 insert_resource(&iomem_resource
, resource
);
229 static acpi_status __devinit
resource_to_window(struct acpi_resource
*resource
,
230 struct acpi_resource_address64
*addr
)
235 * We're only interested in _CRS descriptors that are
236 * - address space descriptors for memory or I/O space
238 * - producers, i.e., the address space is routed downstream,
239 * not consumed by the bridge itself
241 status
= acpi_resource_to_address64(resource
, addr
);
242 if (ACPI_SUCCESS(status
) &&
243 (addr
->resource_type
== ACPI_MEMORY_RANGE
||
244 addr
->resource_type
== ACPI_IO_RANGE
) &&
245 addr
->address_length
&&
246 addr
->producer_consumer
== ACPI_PRODUCER
)
252 static acpi_status __devinit
253 count_window (struct acpi_resource
*resource
, void *data
)
255 unsigned int *windows
= (unsigned int *) data
;
256 struct acpi_resource_address64 addr
;
259 status
= resource_to_window(resource
, &addr
);
260 if (ACPI_SUCCESS(status
))
266 static __devinit acpi_status
add_window(struct acpi_resource
*res
, void *data
)
268 struct pci_root_info
*info
= data
;
269 struct pci_window
*window
;
270 struct acpi_resource_address64 addr
;
272 unsigned long flags
, offset
= 0;
273 struct resource
*root
;
275 /* Return AE_OK for non-window resources to keep scanning for more */
276 status
= resource_to_window(res
, &addr
);
277 if (!ACPI_SUCCESS(status
))
280 if (addr
.resource_type
== ACPI_MEMORY_RANGE
) {
281 flags
= IORESOURCE_MEM
;
282 root
= &iomem_resource
;
283 offset
= addr
.translation_offset
;
284 } else if (addr
.resource_type
== ACPI_IO_RANGE
) {
285 flags
= IORESOURCE_IO
;
286 root
= &ioport_resource
;
287 offset
= add_io_space(info
, &addr
);
293 window
= &info
->controller
->window
[info
->controller
->windows
++];
294 window
->resource
.name
= info
->name
;
295 window
->resource
.flags
= flags
;
296 window
->resource
.start
= addr
.minimum
+ offset
;
297 window
->resource
.end
= window
->resource
.start
+ addr
.address_length
- 1;
298 window
->offset
= offset
;
300 if (insert_resource(root
, &window
->resource
)) {
301 dev_err(&info
->bridge
->dev
,
302 "can't allocate host bridge window %pR\n",
306 dev_info(&info
->bridge
->dev
, "host bridge window %pR "
307 "(PCI address [%#llx-%#llx])\n",
309 window
->resource
.start
- offset
,
310 window
->resource
.end
- offset
);
312 dev_info(&info
->bridge
->dev
,
313 "host bridge window %pR\n",
317 /* HP's firmware has a hack to work around a Windows bug.
318 * Ignore these tiny memory ranges */
319 if (!((window
->resource
.flags
& IORESOURCE_MEM
) &&
320 (window
->resource
.end
- window
->resource
.start
< 16)))
321 pci_add_resource_offset(&info
->resources
, &window
->resource
,
327 struct pci_bus
* __devinit
328 pci_acpi_scan_root(struct acpi_pci_root
*root
)
330 struct acpi_device
*device
= root
->device
;
331 int domain
= root
->segment
;
332 int bus
= root
->secondary
.start
;
333 struct pci_controller
*controller
;
334 unsigned int windows
= 0;
335 struct pci_root_info info
;
336 struct pci_bus
*pbus
;
340 controller
= alloc_pci_controller(domain
);
344 controller
->acpi_handle
= device
->handle
;
346 pxm
= acpi_get_pxm(controller
->acpi_handle
);
349 controller
->node
= pxm_to_node(pxm
);
352 INIT_LIST_HEAD(&info
.resources
);
353 /* insert busn resource at first */
354 pci_add_resource(&info
.resources
, &root
->secondary
);
355 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
, count_window
,
359 kzalloc_node(sizeof(*controller
->window
) * windows
,
360 GFP_KERNEL
, controller
->node
);
361 if (!controller
->window
)
364 name
= kmalloc(16, GFP_KERNEL
);
368 sprintf(name
, "PCI Bus %04x:%02x", domain
, bus
);
369 info
.bridge
= device
;
370 info
.controller
= controller
;
372 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
376 * See arch/x86/pci/acpi.c.
377 * The desired pci bus might already be scanned in a quirk. We
378 * should handle the case here, but it appears that IA64 hasn't
379 * such quirk. So we just ignore the case now.
381 pbus
= pci_create_root_bus(NULL
, bus
, &pci_root_ops
, controller
,
384 pci_free_resource_list(&info
.resources
);
388 pci_scan_child_bus(pbus
);
392 kfree(controller
->window
);
399 static int __devinit
is_valid_resource(struct pci_dev
*dev
, int idx
)
401 unsigned int i
, type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
;
402 struct resource
*devr
= &dev
->resource
[idx
], *busr
;
407 pci_bus_for_each_resource(dev
->bus
, busr
, i
) {
408 if (!busr
|| ((busr
->flags
^ devr
->flags
) & type_mask
))
410 if ((devr
->start
) && (devr
->start
>= busr
->start
) &&
411 (devr
->end
<= busr
->end
))
417 static void __devinit
418 pcibios_fixup_resources(struct pci_dev
*dev
, int start
, int limit
)
422 for (i
= start
; i
< limit
; i
++) {
423 if (!dev
->resource
[i
].flags
)
425 if ((is_valid_resource(dev
, i
)))
426 pci_claim_resource(dev
, i
);
430 void __devinit
pcibios_fixup_device_resources(struct pci_dev
*dev
)
432 pcibios_fixup_resources(dev
, 0, PCI_BRIDGE_RESOURCES
);
434 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources
);
436 static void __devinit
pcibios_fixup_bridge_resources(struct pci_dev
*dev
)
438 pcibios_fixup_resources(dev
, PCI_BRIDGE_RESOURCES
, PCI_NUM_RESOURCES
);
442 * Called after each bus is probed, but before its children are examined.
445 pcibios_fixup_bus (struct pci_bus
*b
)
450 pci_read_bridge_bases(b
);
451 pcibios_fixup_bridge_resources(b
->self
);
453 list_for_each_entry(dev
, &b
->devices
, bus_list
)
454 pcibios_fixup_device_resources(dev
);
455 platform_pci_fixup_bus(b
);
458 void pcibios_set_master (struct pci_dev
*dev
)
460 /* No special bus mastering setup handling */
464 pcibios_enable_device (struct pci_dev
*dev
, int mask
)
468 ret
= pci_enable_resources(dev
, mask
);
472 if (!dev
->msi_enabled
)
473 return acpi_pci_irq_enable(dev
);
478 pcibios_disable_device (struct pci_dev
*dev
)
480 BUG_ON(atomic_read(&dev
->enable_cnt
));
481 if (!dev
->msi_enabled
)
482 acpi_pci_irq_disable(dev
);
486 pcibios_align_resource (void *data
, const struct resource
*res
,
487 resource_size_t size
, resource_size_t align
)
493 pci_mmap_page_range (struct pci_dev
*dev
, struct vm_area_struct
*vma
,
494 enum pci_mmap_state mmap_state
, int write_combine
)
496 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
500 * I/O space cannot be accessed via normal processor loads and
501 * stores on this platform.
503 if (mmap_state
== pci_mmap_io
)
505 * XXX we could relax this for I/O spaces for which ACPI
506 * indicates that the space is 1-to-1 mapped. But at the
507 * moment, we don't support multiple PCI address spaces and
508 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
512 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
515 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
519 * If the user requested WC, the kernel uses UC or WC for this region,
520 * and the chipset supports WC, we can use WC. Otherwise, we have to
521 * use the same attribute the kernel uses.
524 ((pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_UC
||
525 (pgprot_val(prot
) & _PAGE_MA_MASK
) == _PAGE_MA_WC
) &&
526 efi_range_is_wc(vma
->vm_start
, vma
->vm_end
- vma
->vm_start
))
527 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
529 vma
->vm_page_prot
= prot
;
531 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
532 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
539 * ia64_pci_get_legacy_mem - generic legacy mem routine
540 * @bus: bus to get legacy memory base address for
542 * Find the base of legacy memory for @bus. This is typically the first
543 * megabyte of bus address space for @bus or is simply 0 on platforms whose
544 * chipsets support legacy I/O and memory routing. Returns the base address
545 * or an error pointer if an error occurred.
547 * This is the ia64 generic version of this routine. Other platforms
548 * are free to override it with a machine vector.
550 char *ia64_pci_get_legacy_mem(struct pci_bus
*bus
)
552 return (char *)__IA64_UNCACHED_OFFSET
;
556 * pci_mmap_legacy_page_range - map legacy memory space to userland
557 * @bus: bus whose legacy space we're mapping
558 * @vma: vma passed in by mmap
560 * Map legacy memory space for this device back to userspace using a machine
561 * vector to get the base address.
564 pci_mmap_legacy_page_range(struct pci_bus
*bus
, struct vm_area_struct
*vma
,
565 enum pci_mmap_state mmap_state
)
567 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
571 /* We only support mmap'ing of legacy memory space */
572 if (mmap_state
!= pci_mmap_mem
)
576 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
579 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
, size
))
581 prot
= phys_mem_access_prot(NULL
, vma
->vm_pgoff
, size
,
584 addr
= pci_get_legacy_mem(bus
);
586 return PTR_ERR(addr
);
588 vma
->vm_pgoff
+= (unsigned long)addr
>> PAGE_SHIFT
;
589 vma
->vm_page_prot
= prot
;
591 if (remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
592 size
, vma
->vm_page_prot
))
599 * ia64_pci_legacy_read - read from legacy I/O space
601 * @port: legacy port value
602 * @val: caller allocated storage for returned value
603 * @size: number of bytes to read
605 * Simply reads @size bytes from @port and puts the result in @val.
607 * Again, this (and the write routine) are generic versions that can be
608 * overridden by the platform. This is necessary on platforms that don't
609 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
611 int ia64_pci_legacy_read(struct pci_bus
*bus
, u16 port
, u32
*val
, u8 size
)
634 * ia64_pci_legacy_write - perform a legacy I/O write
636 * @port: port to write
637 * @val: value to write
638 * @size: number of bytes to write from @val
640 * Simply writes @size bytes of @val to @port.
642 int ia64_pci_legacy_write(struct pci_bus
*bus
, u16 port
, u32 val
, u8 size
)
665 * set_pci_cacheline_size - determine cacheline size for PCI devices
667 * We want to use the line-size of the outer-most cache. We assume
668 * that this line-size is the same for all CPUs.
670 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
672 static void __init
set_pci_dfl_cacheline_size(void)
674 unsigned long levels
, unique_caches
;
676 pal_cache_config_info_t cci
;
678 status
= ia64_pal_cache_summary(&levels
, &unique_caches
);
680 printk(KERN_ERR
"%s: ia64_pal_cache_summary() failed "
681 "(status=%ld)\n", __func__
, status
);
685 status
= ia64_pal_cache_config_info(levels
- 1,
686 /* cache_type (data_or_unified)= */ 2, &cci
);
688 printk(KERN_ERR
"%s: ia64_pal_cache_config_info() failed "
689 "(status=%ld)\n", __func__
, status
);
692 pci_dfl_cache_line_size
= (1 << cci
.pcci_line_size
) / 4;
695 u64
ia64_dma_get_required_mask(struct device
*dev
)
697 u32 low_totalram
= ((max_pfn
- 1) << PAGE_SHIFT
);
698 u32 high_totalram
= ((max_pfn
- 1) >> (32 - PAGE_SHIFT
));
701 if (!high_totalram
) {
702 /* convert to mask just covering totalram */
703 low_totalram
= (1 << (fls(low_totalram
) - 1));
704 low_totalram
+= low_totalram
- 1;
707 high_totalram
= (1 << (fls(high_totalram
) - 1));
708 high_totalram
+= high_totalram
- 1;
709 mask
= (((u64
)high_totalram
) << 32) + 0xffffffff;
713 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask
);
715 u64
dma_get_required_mask(struct device
*dev
)
717 return platform_dma_get_required_mask(dev
);
719 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
721 static int __init
pcibios_init(void)
723 set_pci_dfl_cacheline_size();
727 subsys_initcall(pcibios_init
);