return start;
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
if (mmap_state == pci_mmap_io)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
}
int
-pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
+pci_mmap_page_range (struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long size = vma->vm_end - vma->vm_start;
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =
*end = rsrc->start + size;
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
pcibios_allocate_resources(1);
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =
}
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
/*
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine)
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
{
int ret;
return 0;
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long phys;
.access = generic_access_phys,
};
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
pci_resource_to_user(pdev, bar, res, &start, &end);
vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
- return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
+ return pci_mmap_page_range(pdev, bar, vma, mmap_type, write_combine);
}
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
else
return -EINVAL;
}
- ret = pci_mmap_page_range(dev, vma,
+ ret = pci_mmap_page_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
* Architectures provide this function if they set HAVE_PCI_MMAP, and
* it accepts the 'write_combine' argument when arch_can_pci_mmap_wc()
* evaluates to nonzero. */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc