1 /* drivers/android/pmem.c
3 * Copyright (C) 2007 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/miscdevice.h>
17 #include <linux/platform_device.h>
19 #include <linux/file.h>
21 #include <linux/list.h>
22 #include <linux/mutex.h>
23 #include <linux/debugfs.h>
24 #include <linux/android_pmem.h>
25 #include <linux/mempolicy.h>
26 #include <linux/sched.h>
28 #include <asm/uaccess.h>
29 #include <asm/cacheflush.h>
31 #define PMEM_MAX_DEVICES 10
32 #define PMEM_MAX_ORDER 128
33 #define PMEM_MIN_ALLOC PAGE_SIZE
37 /* indicates that a refernce to this file has been taken via get_pmem_file,
38 * the file should not be released until put_pmem_file is called */
39 #define PMEM_FLAGS_BUSY 0x1
40 /* indicates that this is a suballocation of a larger master range */
41 #define PMEM_FLAGS_CONNECTED 0x1 << 1
42 /* indicates this is a master and not a sub allocation and that it is mmaped */
43 #define PMEM_FLAGS_MASTERMAP 0x1 << 2
44 /* submap and unsubmap flags indicate:
45 * 00: subregion has never been mmaped
46 * 10: subregion has been mmaped, reference to the mm was taken
47 * 11: subretion has ben released, refernece to the mm still held
48 * 01: subretion has been released, reference to the mm has been released
50 #define PMEM_FLAGS_SUBMAP 0x1 << 3
51 #define PMEM_FLAGS_UNSUBMAP 0x1 << 4
55 /* in alloc mode: an index into the bitmap
56 * in no_alloc mode: the size of the allocation */
58 /* see flags above for descriptions */
60 /* protects this data field, if the mm_mmap sem will be held at the
61 * same time as this sem, the mm sem must be taken first (as this is
62 * the order for vma_open and vma_close ops */
63 struct rw_semaphore sem
;
64 /* info about the mmaping process */
65 struct vm_area_struct
*vma
;
66 /* task struct of the mapping process */
67 struct task_struct
*task
;
68 /* process id of teh mapping process */
70 /* file descriptor of the master */
72 /* file struct of the master */
73 struct file
*master_file
;
74 /* a list of currently available regions if this is a suballocation */
75 struct list_head region_list
;
76 /* a linked list of data so we can access them for debugging */
77 struct list_head list
;
84 unsigned allocated
:1; /* 1 if allocated, 0 if free */
85 unsigned order
:7; /* size of the region in pmem space */
88 struct pmem_region_node
{
89 struct pmem_region region
;
90 struct list_head list
;
93 #define PMEM_DEBUG_MSGS 0
95 #define DLOG(fmt, args...) \
96 do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
100 #define DLOG(x...) do {} while (0)
104 struct miscdevice dev
;
105 /* physical start address of the remaped pmem space */
107 /* vitual start address of the remaped pmem space */
108 unsigned char __iomem
*vbase
;
109 /* total size of the pmem space */
111 /* number of entries in the pmem space */
112 unsigned long num_entries
;
113 /* pfn of the garbage page in memory */
114 unsigned long garbage_pfn
;
115 /* index of the garbage page in the pmem space */
117 /* the bitmap for the region indicating which entries are allocated
118 * and which are free */
119 struct pmem_bits
*bitmap
;
120 /* indicates the region should not be managed with an allocator */
121 unsigned no_allocator
;
122 /* indicates maps of this region should be cached, if a mix of
123 * cached and uncached is desired, set this and open the device with
124 * O_SYNC to get an uncached region */
127 /* in no_allocator mode the first mapper gets the whole space and sets
130 /* for debugging, creates a list of pmem file structs, the
131 * data_list_lock should be taken before pmem_data->sem if both are
133 struct mutex data_list_lock
;
134 struct list_head data_list
;
135 /* pmem_sem protects the bitmap array
136 * a write lock should be held when modifying entries in bitmap
137 * a read lock should be held when reading data from bits or
138 * dereferencing a pointer into bitmap
140 * pmem_data->sem protects the pmem data of a particular file
141 * Many of the function that require the pmem_data->sem have a non-
142 * locking version for when the caller is already holding that sem.
144 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
145 * down(pmem_data->sem) => down(bitmap_sem)
147 struct rw_semaphore bitmap_sem
;
149 long (*ioctl
) (struct file
*, unsigned int, unsigned long);
150 int (*release
) (struct inode
*, struct file
*);
153 static struct pmem_info pmem
[PMEM_MAX_DEVICES
];
156 #define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
157 #define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
158 #define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
159 #define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
160 #define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
161 #define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
162 #define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
163 #define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
165 #define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
166 #define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
168 #define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
169 #define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
170 #define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
171 (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
173 static int pmem_release(struct inode
*, struct file
*);
174 static int pmem_mmap(struct file
*, struct vm_area_struct
*);
175 static int pmem_open(struct inode
*, struct file
*);
176 static long pmem_ioctl(struct file
*, unsigned int, unsigned long);
178 struct file_operations pmem_fops
= {
179 .release
= pmem_release
,
182 .unlocked_ioctl
= pmem_ioctl
,
185 static int get_id(struct file
*file
)
187 return MINOR(file
->f_dentry
->d_inode
->i_rdev
);
190 int is_pmem_file(struct file
*file
)
194 if (unlikely(!file
|| !file
->f_dentry
|| !file
->f_dentry
->d_inode
))
197 if (unlikely(id
>= PMEM_MAX_DEVICES
))
199 if (unlikely(file
->f_dentry
->d_inode
->i_rdev
!= MKDEV(MISC_MAJOR
, pmem
[id
].dev
.minor
)))
204 static int has_allocation(struct file
*file
)
206 struct pmem_data
*data
;
207 /* check is_pmem_file first if not accessed via pmem_file_ops */
209 if (unlikely(!file
->private_data
))
211 data
= (struct pmem_data
*)file
->private_data
;
212 if (unlikely(data
->index
< 0))
217 static int is_master_owner(struct file
*file
)
219 struct file
*master_file
;
220 struct pmem_data
*data
;
221 int put_needed
, ret
= 0;
223 if (!is_pmem_file(file
) || !has_allocation(file
))
225 data
= (struct pmem_data
*)file
->private_data
;
226 if (PMEM_FLAGS_MASTERMAP
& data
->flags
)
228 master_file
= fget_light(data
->master_fd
, &put_needed
);
229 if (master_file
&& data
->master_file
== master_file
)
231 fput_light(master_file
, put_needed
);
235 static int pmem_free(int id
, int index
)
237 /* caller should hold the write lock on pmem_sem! */
238 int buddy
, curr
= index
;
239 DLOG("index %d\n", index
);
241 if (pmem
[id
].no_allocator
) {
242 pmem
[id
].allocated
= 0;
245 /* clean up the bitmap, merging any buddies */
246 pmem
[id
].bitmap
[curr
].allocated
= 0;
247 /* find a slots buddy Buddy# = Slot# ^ (1 << order)
248 * if the buddy is also free merge them
249 * repeat until the buddy is not free or end of the bitmap is reached
252 buddy
= PMEM_BUDDY_INDEX(id
, curr
);
253 if (PMEM_IS_FREE(id
, buddy
) && PMEM_ORDER(id
, buddy
) == PMEM_ORDER(id
, curr
)) {
254 PMEM_ORDER(id
, buddy
)++;
255 PMEM_ORDER(id
, curr
)++;
256 curr
= min(buddy
, curr
);
260 } while (curr
< pmem
[id
].num_entries
);
265 static void pmem_revoke(struct file
*file
, struct pmem_data
*data
);
267 static int pmem_release(struct inode
*inode
, struct file
*file
)
269 struct pmem_data
*data
= (struct pmem_data
*)file
->private_data
;
270 struct pmem_region_node
*region_node
;
271 struct list_head
*elt
, *elt2
;
272 int id
= get_id(file
), ret
= 0;
275 mutex_lock(&pmem
[id
].data_list_lock
);
276 /* if this file is a master, revoke all the memory in the connected
278 if (PMEM_FLAGS_MASTERMAP
& data
->flags
) {
279 struct pmem_data
*sub_data
;
280 list_for_each(elt
, &pmem
[id
].data_list
) {
281 sub_data
= list_entry(elt
, struct pmem_data
, list
);
282 down_read(&sub_data
->sem
);
283 if (PMEM_IS_SUBMAP(sub_data
) && file
== sub_data
->master_file
) {
284 up_read(&sub_data
->sem
);
285 pmem_revoke(file
, sub_data
);
287 up_read(&sub_data
->sem
);
290 list_del(&data
->list
);
291 mutex_unlock(&pmem
[id
].data_list_lock
);
294 down_write(&data
->sem
);
296 /* if its not a conencted file and it has an allocation, free it */
297 if (!(PMEM_FLAGS_CONNECTED
& data
->flags
) && has_allocation(file
)) {
298 down_write(&pmem
[id
].bitmap_sem
);
299 ret
= pmem_free(id
, data
->index
);
300 up_write(&pmem
[id
].bitmap_sem
);
303 /* if this file is a submap (mapped, connected file), downref the
305 if (PMEM_FLAGS_SUBMAP
& data
->flags
)
307 put_task_struct(data
->task
);
311 file
->private_data
= NULL
;
313 list_for_each_safe(elt
, elt2
, &data
->region_list
) {
314 region_node
= list_entry(elt
, struct pmem_region_node
, list
);
318 BUG_ON(!list_empty(&data
->region_list
));
320 up_write(&data
->sem
);
322 if (pmem
[id
].release
)
323 ret
= pmem
[id
].release(inode
, file
);
328 static int pmem_open(struct inode
*inode
, struct file
*file
)
330 struct pmem_data
*data
;
331 int id
= get_id(file
);
334 DLOG("current %u file %p(%d)\n", current
->pid
, file
, file_count(file
));
335 /* setup file->private_data to indicate its unmapped */
336 /* you can only open a pmem device one time */
337 if (file
->private_data
!= NULL
)
339 data
= kmalloc(sizeof(struct pmem_data
), GFP_KERNEL
);
341 printk("pmem: unable to allocate memory for pmem metadata.");
349 data
->master_file
= NULL
;
353 INIT_LIST_HEAD(&data
->region_list
);
354 init_rwsem(&data
->sem
);
356 file
->private_data
= data
;
357 INIT_LIST_HEAD(&data
->list
);
359 mutex_lock(&pmem
[id
].data_list_lock
);
360 list_add(&data
->list
, &pmem
[id
].data_list
);
361 mutex_unlock(&pmem
[id
].data_list_lock
);
365 static unsigned long pmem_order(unsigned long len
)
369 len
= (len
+ PMEM_MIN_ALLOC
- 1) / PMEM_MIN_ALLOC
;
371 for (i
= 0; i
< sizeof(len
) * 8; i
++)
377 static int pmem_allocate(int id
, unsigned long len
)
379 /* caller should hold the write lock on pmem_sem! */
380 /* return the corresponding pdata[] entry */
382 int end
= pmem
[id
].num_entries
;
384 unsigned long order
= pmem_order(len
);
386 if (pmem
[id
].no_allocator
) {
387 DLOG("no allocator");
388 if ((len
> pmem
[id
].size
) || pmem
[id
].allocated
)
390 pmem
[id
].allocated
= 1;
394 if (order
> PMEM_MAX_ORDER
)
396 DLOG("order %lx\n", order
);
398 /* look through the bitmap:
399 * if you find a free slot of the correct order use it
400 * otherwise, use the best fit (smallest with size > order) slot
403 if (PMEM_IS_FREE(id
, curr
)) {
404 if (PMEM_ORDER(id
, curr
) == (unsigned char)order
) {
405 /* set the not free bit and clear others */
409 if (PMEM_ORDER(id
, curr
) > (unsigned char)order
&&
410 (best_fit
< 0 || PMEM_ORDER(id
, curr
) < PMEM_ORDER(id
, best_fit
)))
413 curr
= PMEM_NEXT_INDEX(id
, curr
);
416 /* if best_fit < 0, there are no suitable slots,
420 printk("pmem: no space left to allocate!\n");
424 /* now partition the best fit:
425 * split the slot into 2 buddies of order - 1
426 * repeat until the slot is of the correct order
428 while (PMEM_ORDER(id
, best_fit
) > (unsigned char)order
) {
430 PMEM_ORDER(id
, best_fit
) -= 1;
431 buddy
= PMEM_BUDDY_INDEX(id
, best_fit
);
432 PMEM_ORDER(id
, buddy
) = PMEM_ORDER(id
, best_fit
);
434 pmem
[id
].bitmap
[best_fit
].allocated
= 1;
438 static pgprot_t
pmem_access_prot(struct file
*file
, pgprot_t vma_prot
)
440 int id
= get_id(file
);
441 #ifdef pgprot_noncached
442 if (pmem
[id
].cached
== 0 || file
->f_flags
& O_SYNC
)
443 return pgprot_noncached(vma_prot
);
445 #ifdef pgprot_ext_buffered
446 else if (pmem
[id
].buffered
)
447 return pgprot_ext_buffered(vma_prot
);
452 static unsigned long pmem_start_addr(int id
, struct pmem_data
*data
)
454 if (pmem
[id
].no_allocator
)
455 return PMEM_START_ADDR(id
, 0);
457 return PMEM_START_ADDR(id
, data
->index
);
461 static void *pmem_start_vaddr(int id
, struct pmem_data
*data
)
463 return pmem_start_addr(id
, data
) - pmem
[id
].base
+ pmem
[id
].vbase
;
466 static unsigned long pmem_len(int id
, struct pmem_data
*data
)
468 if (pmem
[id
].no_allocator
)
471 return PMEM_LEN(id
, data
->index
);
474 static int pmem_map_garbage(int id
, struct vm_area_struct
*vma
,
475 struct pmem_data
*data
, unsigned long offset
, unsigned long len
)
477 int i
, garbage_pages
= len
>> PAGE_SHIFT
;
479 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
| VM_PFNMAP
| VM_SHARED
| VM_WRITE
;
480 for (i
= 0; i
< garbage_pages
; i
++) {
481 if (vm_insert_pfn(vma
, vma
->vm_start
+ offset
+ (i
* PAGE_SIZE
),
482 pmem
[id
].garbage_pfn
))
488 static int pmem_unmap_pfn_range(int id
, struct vm_area_struct
*vma
,
489 struct pmem_data
*data
, unsigned long offset
, unsigned long len
)
492 DLOG("unmap offset %lx len %lx\n", offset
, len
);
494 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len
));
496 garbage_pages
= len
>> PAGE_SHIFT
;
497 zap_page_range(vma
, vma
->vm_start
+ offset
, len
, NULL
);
498 pmem_map_garbage(id
, vma
, data
, offset
, len
);
502 static int pmem_map_pfn_range(int id
, struct vm_area_struct
*vma
,
503 struct pmem_data
*data
, unsigned long offset
, unsigned long len
)
505 DLOG("map offset %lx len %lx\n", offset
, len
);
506 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma
->vm_start
));
507 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma
->vm_end
));
508 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len
));
509 BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset
));
511 if (io_remap_pfn_range(vma
, vma
->vm_start
+ offset
,
512 (pmem_start_addr(id
, data
) + offset
) >> PAGE_SHIFT
,
513 len
, vma
->vm_page_prot
)) {
519 static int pmem_remap_pfn_range(int id
, struct vm_area_struct
*vma
,
520 struct pmem_data
*data
, unsigned long offset
, unsigned long len
)
522 /* hold the mm semp for the vma you are modifying when you call this */
524 zap_page_range(vma
, vma
->vm_start
+ offset
, len
, NULL
);
525 return pmem_map_pfn_range(id
, vma
, data
, offset
, len
);
528 static void pmem_vma_open(struct vm_area_struct
*vma
)
530 struct file
*file
= vma
->vm_file
;
531 struct pmem_data
*data
= file
->private_data
;
532 int id
= get_id(file
);
533 /* this should never be called as we don't support copying pmem
535 BUG_ON(!has_allocation(file
));
536 down_write(&data
->sem
);
537 /* remap the garbage pages, forkers don't get access to the data */
538 pmem_unmap_pfn_range(id
, vma
, data
, 0, vma
->vm_start
- vma
->vm_end
);
539 up_write(&data
->sem
);
542 static void pmem_vma_close(struct vm_area_struct
*vma
)
544 struct file
*file
= vma
->vm_file
;
545 struct pmem_data
*data
= file
->private_data
;
547 DLOG("current %u ppid %u file %p count %d\n", current
->pid
,
548 current
->parent
->pid
, file
, file_count(file
));
549 if (unlikely(!is_pmem_file(file
) || !has_allocation(file
))) {
550 printk(KERN_WARNING
"pmem: something is very wrong, you are "
551 "closing a vm backing an allocation that doesn't " "exist!\n");
554 down_write(&data
->sem
);
555 if (data
->vma
== vma
) {
557 if ((data
->flags
& PMEM_FLAGS_CONNECTED
) && (data
->flags
& PMEM_FLAGS_SUBMAP
))
558 data
->flags
|= PMEM_FLAGS_UNSUBMAP
;
560 /* the kernel is going to free this vma now anyway */
561 up_write(&data
->sem
);
564 static struct vm_operations_struct vm_ops
= {
565 .open
= pmem_vma_open
,
566 .close
= pmem_vma_close
,
569 static int pmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
571 struct pmem_data
*data
;
573 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
574 int ret
= 0, id
= get_id(file
);
576 if (vma
->vm_pgoff
|| !PMEM_IS_PAGE_ALIGNED(vma_size
)) {
578 printk(KERN_ERR
"pmem: mmaps must be at offset zero, aligned"
579 " and a multiple of pages_size.\n");
584 data
= (struct pmem_data
*)file
->private_data
;
585 down_write(&data
->sem
);
586 /* check this file isn't already mmaped, for submaps check this file
587 * has never been mmaped */
588 if ((data
->flags
& PMEM_FLAGS_SUBMAP
) || (data
->flags
& PMEM_FLAGS_UNSUBMAP
)) {
590 printk(KERN_ERR
"pmem: you can only mmap a pmem file once, "
591 "this file is already mmaped. %x\n", data
->flags
);
596 /* if file->private_data == unalloced, alloc */
597 if (data
&& data
->index
== -1) {
598 down_write(&pmem
[id
].bitmap_sem
);
599 index
= pmem_allocate(id
, vma
->vm_end
- vma
->vm_start
);
600 up_write(&pmem
[id
].bitmap_sem
);
603 /* either no space was available or an error occured */
604 if (!has_allocation(file
)) {
606 printk("pmem: could not find allocation for map.\n");
610 if (pmem_len(id
, data
) < vma_size
) {
612 printk(KERN_WARNING
"pmem: mmap size [%lu] does not match"
613 "size of backing region [%lu].\n", vma_size
, pmem_len(id
, data
));
619 vma
->vm_pgoff
= pmem_start_addr(id
, data
) >> PAGE_SHIFT
;
620 vma
->vm_page_prot
= pmem_access_prot(file
, vma
->vm_page_prot
);
622 if (data
->flags
& PMEM_FLAGS_CONNECTED
) {
623 struct pmem_region_node
*region_node
;
624 struct list_head
*elt
;
625 if (pmem_map_garbage(id
, vma
, data
, 0, vma_size
)) {
626 printk("pmem: mmap failed in kernel!\n");
630 list_for_each(elt
, &data
->region_list
) {
631 region_node
= list_entry(elt
, struct pmem_region_node
, list
);
632 DLOG("remapping file: %p %lx %lx\n", file
,
633 region_node
->region
.offset
, region_node
->region
.len
);
634 if (pmem_remap_pfn_range(id
, vma
, data
,
635 region_node
->region
.offset
,
636 region_node
->region
.len
)) {
641 data
->flags
|= PMEM_FLAGS_SUBMAP
;
642 get_task_struct(current
->group_leader
);
643 data
->task
= current
->group_leader
;
646 data
->pid
= current
->pid
;
648 DLOG("submmapped file %p vma %p pid %u\n", file
, vma
, current
->pid
);
650 if (pmem_map_pfn_range(id
, vma
, data
, 0, vma_size
)) {
651 printk(KERN_INFO
"pmem: mmap failed in kernel!\n");
655 data
->flags
|= PMEM_FLAGS_MASTERMAP
;
656 data
->pid
= current
->pid
;
658 vma
->vm_ops
= &vm_ops
;
660 up_write(&data
->sem
);
664 /* the following are the api for accessing pmem regions by other drivers
665 * from inside the kernel */
666 int get_pmem_user_addr(struct file
*file
, unsigned long *start
, unsigned long *len
)
668 struct pmem_data
*data
;
669 if (!is_pmem_file(file
) || !has_allocation(file
)) {
671 printk(KERN_INFO
"pmem: requested pmem data from invalid" "file.\n");
675 data
= (struct pmem_data
*)file
->private_data
;
676 down_read(&data
->sem
);
678 *start
= data
->vma
->vm_start
;
679 *len
= data
->vma
->vm_end
- data
->vma
->vm_start
;
688 int get_pmem_addr(struct file
*file
, unsigned long *start
,
689 unsigned long *vstart
, unsigned long *len
)
691 struct pmem_data
*data
;
694 if (!is_pmem_file(file
) || !has_allocation(file
)) {
698 data
= (struct pmem_data
*)file
->private_data
;
699 if (data
->index
== -1) {
701 printk(KERN_INFO
"pmem: requested pmem data from file with no " "allocation.\n");
707 down_read(&data
->sem
);
708 *start
= pmem_start_addr(id
, data
);
709 *len
= pmem_len(id
, data
);
710 *vstart
= (unsigned long)pmem_start_vaddr(id
, data
);
713 down_write(&data
->sem
);
715 up_write(&data
->sem
);
720 int get_pmem_file(int fd
, unsigned long *start
, unsigned long *vstart
,
721 unsigned long *len
, struct file
**filp
)
726 if (unlikely(file
== NULL
)) {
727 printk(KERN_INFO
"pmem: requested data from file descriptor "
728 "that doesn't exist.");
732 if (get_pmem_addr(file
, start
, vstart
, len
))
743 void put_pmem_file(struct file
*file
)
745 struct pmem_data
*data
;
748 if (!is_pmem_file(file
))
751 data
= (struct pmem_data
*)file
->private_data
;
753 down_write(&data
->sem
);
754 if (data
->ref
== 0) {
755 printk("pmem: pmem_put > pmem_get %s (pid %d)\n", pmem
[id
].dev
.name
, data
->pid
);
759 up_write(&data
->sem
);
764 void flush_pmem_file(struct file
*file
, unsigned long offset
, unsigned long len
)
766 struct pmem_data
*data
;
769 struct pmem_region_node
*region_node
;
770 struct list_head
*elt
;
771 void *flush_start
, *flush_end
;
773 if (!is_pmem_file(file
) || !has_allocation(file
)) {
778 data
= (struct pmem_data
*)file
->private_data
;
779 if (!pmem
[id
].cached
|| file
->f_flags
& O_SYNC
)
782 down_read(&data
->sem
);
783 vaddr
= pmem_start_vaddr(id
, data
);
784 /* if this isn't a submmapped file, flush the whole thing */
785 if (unlikely(!(data
->flags
& PMEM_FLAGS_CONNECTED
))) {
786 dmac_flush_range(vaddr
, vaddr
+ pmem_len(id
, data
));
789 /* otherwise, flush the region of the file we are drawing */
790 list_for_each(elt
, &data
->region_list
) {
791 region_node
= list_entry(elt
, struct pmem_region_node
, list
);
792 if ((offset
>= region_node
->region
.offset
) &&
793 ((offset
+ len
) <= (region_node
->region
.offset
+ region_node
->region
.len
))) {
794 flush_start
= vaddr
+ region_node
->region
.offset
;
795 flush_end
= flush_start
+ region_node
->region
.len
;
796 dmac_flush_range(flush_start
, flush_end
);
804 static int pmem_connect(unsigned long connect
, struct file
*file
)
806 struct pmem_data
*data
= (struct pmem_data
*)file
->private_data
;
807 struct pmem_data
*src_data
;
808 struct file
*src_file
;
809 int ret
= 0, put_needed
;
811 down_write(&data
->sem
);
812 /* retrieve the src file and check it is a pmem file with an alloc */
813 src_file
= fget_light(connect
, &put_needed
);
814 DLOG("connect %p to %p\n", file
, src_file
);
816 printk("pmem: src file not found!\n");
820 if (unlikely(!is_pmem_file(src_file
) || !has_allocation(src_file
))) {
821 printk(KERN_INFO
"pmem: src file is not a pmem file or has no " "alloc!\n");
825 src_data
= (struct pmem_data
*)src_file
->private_data
;
827 if (has_allocation(file
) && (data
->index
!= src_data
->index
)) {
828 printk("pmem: file is already mapped but doesn't match this" " src_file!\n");
832 data
->index
= src_data
->index
;
833 data
->flags
|= PMEM_FLAGS_CONNECTED
;
834 data
->master_fd
= connect
;
835 data
->master_file
= src_file
;
838 fput_light(src_file
, put_needed
);
840 up_write(&data
->sem
);
844 static void pmem_unlock_data_and_mm(struct pmem_data
*data
, struct mm_struct
*mm
)
846 up_write(&data
->sem
);
848 up_write(&mm
->mmap_sem
);
853 static int pmem_lock_data_and_mm(struct file
*file
, struct pmem_data
*data
,
854 struct mm_struct
**locked_mm
)
857 struct mm_struct
*mm
= NULL
;
860 down_read(&data
->sem
);
861 if (PMEM_IS_SUBMAP(data
)) {
862 mm
= get_task_mm(data
->task
);
865 printk("pmem: can't remap task is gone!\n");
874 down_write(&mm
->mmap_sem
);
876 down_write(&data
->sem
);
877 /* check that the file didn't get mmaped before we could take the
878 * data sem, this should be safe b/c you can only submap each file
880 if (PMEM_IS_SUBMAP(data
) && !mm
) {
881 pmem_unlock_data_and_mm(data
, mm
);
882 up_write(&data
->sem
);
885 /* now check that vma.mm is still there, it could have been
886 * deleted by vma_close before we could get the data->sem */
887 if ((data
->flags
& PMEM_FLAGS_UNSUBMAP
) && (mm
!= NULL
)) {
888 /* might as well release this */
889 if (data
->flags
& PMEM_FLAGS_SUBMAP
) {
890 put_task_struct(data
->task
);
892 /* lower the submap flag to show the mm is gone */
893 data
->flags
&= ~(PMEM_FLAGS_SUBMAP
);
895 pmem_unlock_data_and_mm(data
, mm
);
902 int pmem_remap(struct pmem_region
*region
, struct file
*file
, unsigned operation
)
905 struct pmem_region_node
*region_node
;
906 struct mm_struct
*mm
= NULL
;
907 struct list_head
*elt
, *elt2
;
908 int id
= get_id(file
);
909 struct pmem_data
*data
= (struct pmem_data
*)file
->private_data
;
911 /* pmem region must be aligned on a page boundry */
912 if (unlikely(!PMEM_IS_PAGE_ALIGNED(region
->offset
) || !PMEM_IS_PAGE_ALIGNED(region
->len
))) {
914 printk("pmem: request for unaligned pmem suballocation "
915 "%lx %lx\n", region
->offset
, region
->len
);
920 /* if userspace requests a region of len 0, there's nothing to do */
921 if (region
->len
== 0)
924 /* lock the mm and data */
925 ret
= pmem_lock_data_and_mm(file
, data
, &mm
);
929 /* only the owner of the master file can remap the client fds
931 if (!is_master_owner(file
)) {
933 printk("pmem: remap requested from non-master process\n");
939 /* check that the requested range is within the src allocation */
940 if (unlikely((region
->offset
> pmem_len(id
, data
)) ||
941 (region
->len
> pmem_len(id
, data
)) ||
942 (region
->offset
+ region
->len
> pmem_len(id
, data
)))) {
944 printk(KERN_INFO
"pmem: suballoc doesn't fit in src_file!\n");
950 if (operation
== PMEM_MAP
) {
951 region_node
= kmalloc(sizeof(struct pmem_region_node
), GFP_KERNEL
);
955 printk(KERN_INFO
"No space to allocate metadata!");
959 region_node
->region
= *region
;
960 list_add(®ion_node
->list
, &data
->region_list
);
961 } else if (operation
== PMEM_UNMAP
) {
963 list_for_each_safe(elt
, elt2
, &data
->region_list
) {
964 region_node
= list_entry(elt
, struct pmem_region_node
, list
);
965 if (region
->len
== 0 ||
966 (region_node
->region
.offset
== region
->offset
&&
967 region_node
->region
.len
== region
->len
)) {
975 printk("pmem: Unmap region does not map any mapped " "region!");
982 if (data
->vma
&& PMEM_IS_SUBMAP(data
)) {
983 if (operation
== PMEM_MAP
)
984 ret
= pmem_remap_pfn_range(id
, data
->vma
, data
,
985 region
->offset
, region
->len
);
986 else if (operation
== PMEM_UNMAP
)
987 ret
= pmem_unmap_pfn_range(id
, data
->vma
, data
,
988 region
->offset
, region
->len
);
992 pmem_unlock_data_and_mm(data
, mm
);
996 static void pmem_revoke(struct file
*file
, struct pmem_data
*data
)
998 struct pmem_region_node
*region_node
;
999 struct list_head
*elt
, *elt2
;
1000 struct mm_struct
*mm
= NULL
;
1001 int id
= get_id(file
);
1004 data
->master_file
= NULL
;
1005 ret
= pmem_lock_data_and_mm(file
, data
, &mm
);
1006 /* if lock_data_and_mm fails either the task that mapped the fd, or
1007 * the vma that mapped it have already gone away, nothing more
1008 * needs to be done */
1011 /* unmap everything */
1012 /* delete the regions and region list nothing is mapped any more */
1014 list_for_each_safe(elt
, elt2
, &data
->region_list
) {
1015 region_node
= list_entry(elt
, struct pmem_region_node
, list
);
1016 pmem_unmap_pfn_range(id
, data
->vma
, data
,
1017 region_node
->region
.offset
, region_node
->region
.len
);
1021 /* delete the master file */
1022 pmem_unlock_data_and_mm(data
, mm
);
1025 static void pmem_get_size(struct pmem_region
*region
, struct file
*file
)
1027 struct pmem_data
*data
= (struct pmem_data
*)file
->private_data
;
1028 int id
= get_id(file
);
1030 if (!has_allocation(file
)) {
1035 region
->offset
= pmem_start_addr(id
, data
);
1036 region
->len
= pmem_len(id
, data
);
1038 DLOG("offset %lx len %lx\n", region
->offset
, region
->len
);
1042 static long pmem_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1044 struct pmem_data
*data
;
1045 int id
= get_id(file
);
1050 struct pmem_region region
;
1052 if (!has_allocation(file
)) {
1056 data
= (struct pmem_data
*)file
->private_data
;
1057 region
.offset
= pmem_start_addr(id
, data
);
1058 region
.len
= pmem_len(id
, data
);
1060 printk(KERN_INFO
"pmem: request for physical address of pmem region "
1061 "from process %d.\n", current
->pid
);
1062 if (copy_to_user((void __user
*)arg
, ®ion
, sizeof(struct pmem_region
)))
1068 struct pmem_region region
;
1069 if (copy_from_user(®ion
, (void __user
*)arg
, sizeof(struct pmem_region
)))
1071 data
= (struct pmem_data
*)file
->private_data
;
1072 return pmem_remap(®ion
, file
, PMEM_MAP
);
1077 struct pmem_region region
;
1078 if (copy_from_user(®ion
, (void __user
*)arg
, sizeof(struct pmem_region
)))
1080 data
= (struct pmem_data
*)file
->private_data
;
1081 return pmem_remap(®ion
, file
, PMEM_UNMAP
);
1086 struct pmem_region region
;
1088 pmem_get_size(®ion
, file
);
1089 if (copy_to_user((void __user
*)arg
, ®ion
, sizeof(struct pmem_region
)))
1093 case PMEM_GET_TOTAL_SIZE
:
1095 struct pmem_region region
;
1096 DLOG("get total size\n");
1099 region
.len
= pmem
[id
].size
;
1100 if (copy_to_user((void __user
*)arg
, ®ion
, sizeof(struct pmem_region
)))
1106 if (has_allocation(file
))
1108 data
= (struct pmem_data
*)file
->private_data
;
1109 data
->index
= pmem_allocate(id
, arg
);
1114 return pmem_connect(arg
, file
);
1116 case PMEM_CACHE_FLUSH
:
1118 struct pmem_region region
;
1120 if (copy_from_user(®ion
, (void __user
*)arg
, sizeof(struct pmem_region
)))
1122 flush_pmem_file(file
, region
.offset
, region
.len
);
1127 return pmem
[id
].ioctl(file
, cmd
, arg
);
1134 static ssize_t
debug_open(struct inode
*inode
, struct file
*file
)
1136 file
->private_data
= inode
->i_private
;
1140 static ssize_t
debug_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1142 struct list_head
*elt
, *elt2
;
1143 struct pmem_data
*data
;
1144 struct pmem_region_node
*region_node
;
1145 int id
= (int)file
->private_data
;
1146 const int debug_bufmax
= 4096;
1147 static char buffer
[4096];
1150 DLOG("debug open\n");
1151 n
= scnprintf(buffer
, debug_bufmax
,
1152 "pid #: mapped regions (offset, len) (offset,len)...\n");
1154 mutex_lock(&pmem
[id
].data_list_lock
);
1155 list_for_each(elt
, &pmem
[id
].data_list
) {
1156 data
= list_entry(elt
, struct pmem_data
, list
);
1157 down_read(&data
->sem
);
1158 n
+= scnprintf(buffer
+ n
, debug_bufmax
- n
, "pid %u:", data
->pid
);
1159 list_for_each(elt2
, &data
->region_list
) {
1160 region_node
= list_entry(elt2
, struct pmem_region_node
, list
);
1161 n
+= scnprintf(buffer
+ n
, debug_bufmax
- n
,
1163 region_node
->region
.offset
, region_node
->region
.len
);
1165 n
+= scnprintf(buffer
+ n
, debug_bufmax
- n
, "\n");
1166 up_read(&data
->sem
);
1168 mutex_unlock(&pmem
[id
].data_list_lock
);
1172 return simple_read_from_buffer(buf
, count
, ppos
, buffer
, n
);
1175 static struct file_operations debug_fops
= {
1182 static struct miscdevice pmem_dev
= {
1188 int pmem_setup(struct android_pmem_platform_data
*pdata
,
1189 long (*ioctl
) (struct file
*, unsigned int, unsigned long),
1190 int (*release
) (struct inode
*, struct file
*))
1197 pmem
[id
].no_allocator
= pdata
->no_allocator
;
1198 pmem
[id
].cached
= pdata
->cached
;
1199 pmem
[id
].buffered
= pdata
->buffered
;
1200 pmem
[id
].base
= pdata
->start
;
1201 pmem
[id
].size
= pdata
->size
;
1202 pmem
[id
].ioctl
= ioctl
;
1203 pmem
[id
].release
= release
;
1204 init_rwsem(&pmem
[id
].bitmap_sem
);
1205 mutex_init(&pmem
[id
].data_list_lock
);
1206 INIT_LIST_HEAD(&pmem
[id
].data_list
);
1207 pmem
[id
].dev
.name
= pdata
->name
;
1208 pmem
[id
].dev
.minor
= id
;
1209 pmem
[id
].dev
.fops
= &pmem_fops
;
1210 printk(KERN_INFO
"%s: %d init\n", pdata
->name
, pdata
->cached
);
1212 err
= misc_register(&pmem
[id
].dev
);
1214 printk(KERN_ALERT
"Unable to register pmem driver!\n");
1215 goto err_cant_register_device
;
1217 pmem
[id
].num_entries
= pmem
[id
].size
/ PMEM_MIN_ALLOC
;
1219 pmem
[id
].bitmap
= kmalloc(pmem
[id
].num_entries
* sizeof(struct pmem_bits
), GFP_KERNEL
);
1220 if (!pmem
[id
].bitmap
)
1221 goto err_no_mem_for_metadata
;
1223 memset(pmem
[id
].bitmap
, 0, sizeof(struct pmem_bits
) * pmem
[id
].num_entries
);
1225 for (i
= sizeof(pmem
[id
].num_entries
) * 8 - 1; i
>= 0; i
--) {
1226 if ((pmem
[id
].num_entries
) & 1 << i
) {
1227 PMEM_ORDER(id
, index
) = i
;
1228 index
= PMEM_NEXT_INDEX(id
, index
);
1232 if (pmem
[id
].cached
)
1233 pmem
[id
].vbase
= ioremap_cached(pmem
[id
].base
, pmem
[id
].size
);
1234 #ifdef ioremap_ext_buffered
1235 else if (pmem
[id
].buffered
)
1236 pmem
[id
].vbase
= ioremap_ext_buffered(pmem
[id
].base
, pmem
[id
].size
);
1239 pmem
[id
].vbase
= ioremap(pmem
[id
].base
, pmem
[id
].size
);
1241 if (pmem
[id
].vbase
== 0)
1242 goto error_cant_remap
;
1244 pmem
[id
].garbage_pfn
= page_to_pfn(alloc_page(GFP_KERNEL
));
1245 if (pmem
[id
].no_allocator
)
1246 pmem
[id
].allocated
= 0;
1249 debugfs_create_file(pdata
->name
, S_IFREG
| S_IRUGO
, NULL
, (void *)id
, &debug_fops
);
1253 kfree(pmem
[id
].bitmap
);
1254 err_no_mem_for_metadata
:
1255 misc_deregister(&pmem
[id
].dev
);
1256 err_cant_register_device
:
1260 static int pmem_probe(struct platform_device
*pdev
)
1262 struct android_pmem_platform_data
*pdata
;
1264 if (!pdev
|| !pdev
->dev
.platform_data
) {
1265 printk(KERN_ALERT
"Unable to probe pmem!\n");
1268 pdata
= pdev
->dev
.platform_data
;
1269 return pmem_setup(pdata
, NULL
, NULL
);
1273 static int pmem_remove(struct platform_device
*pdev
)
1276 __free_page(pfn_to_page(pmem
[id
].garbage_pfn
));
1277 misc_deregister(&pmem
[id
].dev
);
1281 static struct platform_driver pmem_driver
= {
1282 .probe
= pmem_probe
,
1283 .remove
= pmem_remove
,
1284 .driver
= {.name
= "android_pmem"}
1288 static int __init
pmem_init(void)
1290 return platform_driver_register(&pmem_driver
);
1293 static void __exit
pmem_exit(void)
1295 platform_driver_unregister(&pmem_driver
);
1297 module_init(pmem_init
);
1298 module_exit(pmem_exit
);