struct vm_operations_struct generic_file_vm_ops = {
};
-EXPORT_SYMBOL(vfree);
-EXPORT_SYMBOL(vmalloc_to_page);
-EXPORT_SYMBOL(vmalloc_32);
-EXPORT_SYMBOL(vmap);
-EXPORT_SYMBOL(vunmap);
-
/*
* Handle all mappings that got truncated by a "truncate()"
* system call.
finish_or_fault:
return i ? : -EFAULT;
}
-
EXPORT_SYMBOL(get_user_pages);
DEFINE_RWLOCK(vmlist_lock);
{
kfree(addr);
}
+EXPORT_SYMBOL(vfree);
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
*/
return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
}
+EXPORT_SYMBOL(__vmalloc);
struct page * vmalloc_to_page(void *addr)
{
return virt_to_page(addr);
}
+EXPORT_SYMBOL(vmalloc_to_page);
unsigned long vmalloc_to_pfn(void *addr)
{
return page_to_pfn(virt_to_page(addr));
}
-
+EXPORT_SYMBOL(vmalloc_to_pfn);
long vread(char *buf, char *addr, unsigned long count)
{
}
EXPORT_SYMBOL(vmalloc_node);
-/*
- * vmalloc_32 - allocate virtually continguos memory (32bit addressable)
- *
+/**
+ * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
* @size: allocation size
*
* Allocate enough 32bit PA addressable pages to cover @size from the
{
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
}
+EXPORT_SYMBOL(vmalloc_32);
+
+/**
+ * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
+ * @size: allocation size
+ *
+ * The resulting memory area is 32bit addressable and zeroed so it can be
+ * mapped to userspace without leaking data.
+ */
+void *vmalloc_32_user(unsigned long size)
+{
+ return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+}
+EXPORT_SYMBOL(vmalloc_32_user);
void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
{
BUG();
return NULL;
}
+EXPORT_SYMBOL(vmap);
void vunmap(void *addr)
{
BUG();
}
+EXPORT_SYMBOL(vunmap);
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
{
}
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+ struct page *page)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vm_insert_page);
+
/*
* sys_brk() for the most part doesn't need the global kernel
* lock, except when an application is doing something nasty
show_free_areas();
return -ENOMEM;
}
+EXPORT_SYMBOL(do_mmap_pgoff);
/*
* handle mapping disposal for uClinux
return 0;
}
+EXPORT_SYMBOL(do_munmap);
asmlinkage long sys_munmap(unsigned long addr, size_t len)
{
return vma->vm_start;
}
+EXPORT_SYMBOL(do_mremap);
asmlinkage unsigned long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
return get_area(file, addr, len, pgoff, flags);
}
-
EXPORT_SYMBOL(get_unmapped_area);
/*
BUG();
return 0;
}
+EXPORT_SYMBOL(filemap_fault);
/*
* Access another process' address space.