/*
* Page types allocated by the device.
*/
-enum {
+enum mspec_page_type {
MSPEC_FETCHOP = 1,
MSPEC_CACHED,
MSPEC_UNCACHED
* One of these structures is allocated when an mspec region is mmaped. The
* structure is pointed to by the vma->vm_private_data field in the vma struct.
* This structure is used to record the addresses of the mspec pages.
+ * This structure is shared by all vma's that are split off from the
+ * original vma when split_vma()'s are done.
+ *
+ * The refcnt is incremented atomically because mm->mmap_sem does not
+ * protect in fork case where multiple tasks share the vma_data.
*/
struct vma_data {
atomic_t refcnt; /* Number of vmas sharing the data. */
- spinlock_t lock; /* Serialize access to the vma. */
+ spinlock_t lock; /* Serialize access to this structure. */
int count; /* Number of pages allocated. */
- int type; /* Type of pages allocated. */
+ enum mspec_page_type type; /* Type of pages allocated. */
+ int flags; /* See VMD_xxx below. */
+ unsigned long vm_start; /* Original (unsplit) base. */
+ unsigned long vm_end; /* Original (unsplit) end. */
unsigned long maddr[0]; /* Array of MSPEC addresses. */
};
+#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */
+
/* used on shub2 to clear FOP cache in the HUB */
static unsigned long scratch_page[MAX_NUMNODES];
#define SH2_AMO_CACHE_ENTRIES 4
* mspec_open
*
* Called when a device mapping is created by a means other than mmap
- * (via fork, etc.). Increments the reference count on the underlying
- * mspec data so it is not freed prematurely.
+ * (via fork, munmap, etc.). Increments the reference count on the
+ * underlying mspec data so it is not freed prematurely.
*/
static void
mspec_open(struct vm_area_struct *vma)
mspec_close(struct vm_area_struct *vma)
{
struct vma_data *vdata;
- int i, pages, result, vdata_size;
+ int index, last_index, result;
+ unsigned long my_page;
vdata = vma->vm_private_data;
- if (!atomic_dec_and_test(&vdata->refcnt))
- return;
- pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
- for (i = 0; i < pages; i++) {
- if (vdata->maddr[i] == 0)
+ BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
+
+ spin_lock(&vdata->lock);
+ index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
+ last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+ for (; index < last_index; index++) {
+ if (vdata->maddr[index] == 0)
continue;
/*
* Clear the page before sticking it back
* into the pool.
*/
- result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
+ my_page = vdata->maddr[index];
+ vdata->maddr[index] = 0;
+ spin_unlock(&vdata->lock);
+ result = mspec_zero_block(my_page, PAGE_SIZE);
if (!result)
- uncached_free_page(vdata->maddr[i]);
+ uncached_free_page(my_page);
else
printk(KERN_WARNING "mspec_close(): "
"failed to zero page %i\n",
result);
+ spin_lock(&vdata->lock);
}
+ spin_unlock(&vdata->lock);
- if (vdata_size <= PAGE_SIZE)
- kfree(vdata);
- else
+ if (!atomic_dec_and_test(&vdata->refcnt))
+ return;
+
+ if (vdata->flags & VMD_VMALLOCED)
vfree(vdata);
+ else
+ kfree(vdata);
}
int index;
struct vma_data *vdata = vma->vm_private_data;
- index = (address - vma->vm_start) >> PAGE_SHIFT;
+ BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
+ index = (address - vdata->vm_start) >> PAGE_SHIFT;
maddr = (volatile unsigned long) vdata->maddr[index];
if (maddr == 0) {
maddr = uncached_alloc_page(numa_node_id());
* underlying pages.
*/
static int
-mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
+mspec_mmap(struct file *file, struct vm_area_struct *vma,
+ enum mspec_page_type type)
{
struct vma_data *vdata;
- int pages, vdata_size;
+ int pages, vdata_size, flags = 0;
if (vma->vm_pgoff != 0)
return -EINVAL;
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
if (vdata_size <= PAGE_SIZE)
vdata = kmalloc(vdata_size, GFP_KERNEL);
- else
+ else {
vdata = vmalloc(vdata_size);
+ flags = VMD_VMALLOCED;
+ }
if (!vdata)
return -ENOMEM;
memset(vdata, 0, vdata_size);
+ vdata->vm_start = vma->vm_start;
+ vdata->vm_end = vma->vm_end;
+ vdata->flags = flags;
vdata->type = type;
spin_lock_init(&vdata->lock);
vdata->refcnt = ATOMIC_INIT(1);