2 #include <linux/uaccess.h>
3 #include <linux/module.h>
5 #include <linux/vmalloc.h>
6 #include <linux/slab.h>
7 #include <linux/xlog.h>
11 #include <linux/ion_drv.h>
12 #include <linux/scatterlist.h>
13 #include <linux/err.h>
14 #include <linux/mutex.h>
15 #include <linux/mmprofile.h>
17 #define ION_FUNC_ENTER //MMProfileLogMetaString(MMP_ION_DEBUG, MMProfileFlagStart, __func__);
18 #define ION_FUNC_LEAVE //MMProfileLogMetaString(MMP_ION_DEBUG, MMProfileFlagEnd, __func__);
20 extern struct ion_heap
*g_ion_heaps
[ION_HEAP_IDX_MAX
];
25 unsigned int security
;
26 unsigned int coherent
;
31 static DEFINE_MUTEX(ion_mm_buffer_info_mutex
);
33 static int ion_mm_heap_allocate(struct ion_heap
*heap
,
34 struct ion_buffer
*buffer
,
35 unsigned long size
, unsigned long align
,
38 ion_mm_buffer_info
* pBufferInfo
= NULL
;
41 struct sg_table
*table
;
42 struct scatterlist
*sg
;
45 pVA
= vmalloc_user(size
);
46 buffer
->priv_virt
= NULL
;
47 if (IS_ERR_OR_NULL(pVA
))
49 printk("[ion_mm_heap_allocate]: Error. Allocate buffer failed.\n");
53 pBufferInfo
= (ion_mm_buffer_info
*) kzalloc(sizeof(ion_mm_buffer_info
), GFP_KERNEL
);
54 if (IS_ERR_OR_NULL(pBufferInfo
))
57 printk("[ion_mm_heap_allocate]: Error. Allocate ion_buffer failed.\n");
61 table
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
69 ret
= sg_alloc_table(table
, PAGE_ALIGN(size
) / PAGE_SIZE
, GFP_KERNEL
);
79 for (addr
=(unsigned int)pVA
; addr
< (unsigned int) pVA
+ size
; addr
+= PAGE_SIZE
)
81 struct page
*page
= vmalloc_to_page((void*)addr
);
82 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
85 buffer
->sg_table
= table
;
87 pBufferInfo
->pVA
= pVA
;
88 pBufferInfo
->eModuleID
= -1;
89 buffer
->priv_virt
= pBufferInfo
;
94 static void ion_mm_heap_free(struct ion_buffer
*buffer
)
96 ion_mm_buffer_info
* pBufferInfo
= (ion_mm_buffer_info
*) buffer
->priv_virt
;
98 mutex_lock(&ion_mm_buffer_info_mutex
);
101 if ((pBufferInfo
->eModuleID
!= -1) && (pBufferInfo
->MVA
))
102 m4u_dealloc_mva(pBufferInfo
->eModuleID
, (unsigned int)pBufferInfo
->pVA
, buffer
->size
, pBufferInfo
->MVA
);
103 if (pBufferInfo
->pVA
)
104 vfree(pBufferInfo
->pVA
);
106 if (buffer
->sg_table
)
107 sg_free_table(buffer
->sg_table
);
108 kfree(buffer
->sg_table
);
110 mutex_unlock(&ion_mm_buffer_info_mutex
);
114 static void *ion_mm_heap_map_kernel(struct ion_heap
*heap
,
115 struct ion_buffer
*buffer
)
117 ion_mm_buffer_info
* pBufferInfo
= (ion_mm_buffer_info
*) buffer
->priv_virt
;
121 pVA
= pBufferInfo
->pVA
;
126 static void ion_mm_heap_unmap_kernel(struct ion_heap
*heap
,
127 struct ion_buffer
*buffer
)
133 static struct sg_table
* ion_mm_heap_map_dma(struct ion_heap
*heap
, struct ion_buffer
*buffer
)
137 return buffer
->sg_table
;
140 static void ion_mm_heap_unmap_dma(struct ion_heap
*heap
, struct ion_buffer
*buffer
)
147 static int ion_mm_heap_map_user(struct ion_heap
*heap
, struct ion_buffer
*buffer
,
148 struct vm_area_struct
*vma
)
151 ion_mm_buffer_info
* pBufferInfo
= (ion_mm_buffer_info
*) buffer
->priv_virt
;
153 if ((!pBufferInfo
) || (!pBufferInfo
->pVA
))
155 printk("[ion_mm_heap_map_user]: Error. Invalid buffer.\n");
158 ret
= remap_vmalloc_range(vma
, pBufferInfo
->pVA
, vma
->vm_pgoff
);
163 static int ion_mm_heap_phys(struct ion_heap
*heap
,
164 struct ion_buffer
*buffer
,
165 ion_phys_addr_t
*addr
, size_t *len
)
167 ion_mm_buffer_info
* pBufferInfo
= (ion_mm_buffer_info
*) buffer
->priv_virt
;
171 printk("[ion_mm_heap_phys]: Error. Invalid buffer.\n");
173 return -EFAULT
; // Invalid buffer
175 if (pBufferInfo
->eModuleID
== -1)
177 printk("[ion_mm_heap_phys]: Error. Buffer not configured.\n");
179 return -EFAULT
; // Buffer not configured.
182 mutex_lock(&ion_mm_buffer_info_mutex
);
183 if (pBufferInfo
->MVA
== 0)
185 int ret
= m4u_alloc_mva(pBufferInfo
->eModuleID
, (unsigned int)pBufferInfo
->pVA
, buffer
->size
, pBufferInfo
->security
, pBufferInfo
->coherent
, &pBufferInfo
->MVA
);
188 mutex_unlock(&ion_mm_buffer_info_mutex
);
189 pBufferInfo
->MVA
= 0;
190 printk("[ion_mm_heap_phys]: Error. Allocate MVA failed.\n");
195 mutex_unlock(&ion_mm_buffer_info_mutex
);
196 *addr
= (ion_phys_addr_t
) pBufferInfo
->MVA
; // MVA address
202 long ion_mm_ioctl(struct ion_client
*client
, unsigned int cmd
, unsigned long arg
, int from_kernel
)
207 unsigned long ret_copy
;
210 Param
= *(ion_mm_data_t
*) arg
;
212 ret_copy
= copy_from_user(&Param
, (void __user
*)arg
, sizeof(ion_mm_data_t
));
213 switch (Param
.mm_cmd
)
215 case ION_MM_CONFIG_BUFFER
:
217 struct ion_buffer
* buffer
;
218 if (Param
.config_buffer_param
.handle
)
220 buffer
= ion_handle_buffer(Param
.config_buffer_param
.handle
);
221 if (buffer
->heap
== g_ion_heaps
[ION_HEAP_IDX_MULTIMEDIA
])
223 ion_mm_buffer_info
* pBufferInfo
= buffer
->priv_virt
;
224 mutex_lock(&ion_mm_buffer_info_mutex
);
225 if (pBufferInfo
->MVA
== 0)
227 pBufferInfo
->eModuleID
= Param
.config_buffer_param
.eModuleID
;
228 pBufferInfo
->security
= Param
.config_buffer_param
.security
;
229 pBufferInfo
->coherent
= Param
.config_buffer_param
.coherent
;
233 //printk("[ion_mm_heap]: Warning. Cannot config buffer after GET_PHYS is called.\n");
234 ret
= -ION_ERROR_CONFIG_LOCKED
;
236 mutex_unlock(&ion_mm_buffer_info_mutex
);
240 printk("[ion_mm_heap]: Error. Cannot configure buffer that is not from multimedia heap.\n");
246 printk("[ion_mm_heap]: Error. Configure buffer with invalid handle.\n");
249 sprintf(dbgstr
, "ION_MM_CONFIG_BUFFER:handle=0x%08X, eModuleID=%d, security=%d, coherent=%d", (unsigned int)Param
.config_buffer_param
.handle
, Param
.config_buffer_param
.eModuleID
, Param
.config_buffer_param
.security
, Param
.config_buffer_param
.coherent
);
253 printk("[ion_mm_heap]: Error. Invalid command.\n");
257 *(ion_mm_data_t
*)arg
= Param
;
259 ret_copy
= copy_to_user((void __user
*)arg
, &Param
, sizeof(ion_mm_data_t
));
264 struct ion_heap_ops mm_heap_ops
= {
265 .allocate
= ion_mm_heap_allocate
,
266 .free
= ion_mm_heap_free
,
267 .map_kernel
= ion_mm_heap_map_kernel
,
268 .unmap_kernel
= ion_mm_heap_unmap_kernel
,
269 .map_dma
= ion_mm_heap_map_dma
,
270 .unmap_dma
= ion_mm_heap_unmap_dma
,
271 .map_user
= ion_mm_heap_map_user
,
272 .phys
= ion_mm_heap_phys
,
276 struct ion_heap
*ion_mm_heap_create(struct ion_platform_heap
*unused
)
278 struct ion_heap
*heap
;
279 heap
= kzalloc(sizeof(struct ion_heap
), GFP_KERNEL
);
281 return ERR_PTR(-ENOMEM
);
282 heap
->ops
= &mm_heap_ops
;
283 heap
->type
= ION_HEAP_TYPE_MULTIMEDIA
;
287 void ion_mm_heap_destroy(struct ion_heap
*heap
)