import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / android / ion / mtk / ion_mm_heap_old.c
1
2 #include <linux/uaccess.h>
3 #include <linux/module.h>
4 #include <linux/mm.h>
5 #include <linux/vmalloc.h>
6 #include <linux/slab.h>
7 #include <linux/xlog.h>
8 #include <mach/m4u.h>
9 #include <linux/ion.h>
10 #include "ion_priv.h"
11 #include <linux/ion_drv.h>
12 #include <linux/scatterlist.h>
13 #include <linux/err.h>
14 #include <linux/mutex.h>
15 #include <linux/mmprofile.h>
16
17 #define ION_FUNC_ENTER //MMProfileLogMetaString(MMP_ION_DEBUG, MMProfileFlagStart, __func__);
18 #define ION_FUNC_LEAVE //MMProfileLogMetaString(MMP_ION_DEBUG, MMProfileFlagEnd, __func__);
19
20 extern struct ion_heap *g_ion_heaps[ION_HEAP_IDX_MAX];
21
22 typedef struct
23 {
24 int eModuleID;
25 unsigned int security;
26 unsigned int coherent;
27 void* pVA;
28 unsigned int MVA;
29 } ion_mm_buffer_info;
30
31 static DEFINE_MUTEX(ion_mm_buffer_info_mutex);
32
33 static int ion_mm_heap_allocate(struct ion_heap *heap,
34 struct ion_buffer *buffer,
35 unsigned long size, unsigned long align,
36 unsigned long flags)
37 {
38 ion_mm_buffer_info* pBufferInfo = NULL;
39 int ret;
40 unsigned int addr;
41 struct sg_table *table;
42 struct scatterlist *sg;
43 void* pVA;
44 ION_FUNC_ENTER;
45 pVA = vmalloc_user(size);
46 buffer->priv_virt = NULL;
47 if (IS_ERR_OR_NULL(pVA))
48 {
49 printk("[ion_mm_heap_allocate]: Error. Allocate buffer failed.\n");
50 ION_FUNC_LEAVE;
51 return -ENOMEM;
52 }
53 pBufferInfo = (ion_mm_buffer_info*) kzalloc(sizeof(ion_mm_buffer_info), GFP_KERNEL);
54 if (IS_ERR_OR_NULL(pBufferInfo))
55 {
56 vfree(pVA);
57 printk("[ion_mm_heap_allocate]: Error. Allocate ion_buffer failed.\n");
58 ION_FUNC_LEAVE;
59 return -ENOMEM;
60 }
61 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
62 if (!table)
63 {
64 vfree(pVA);
65 kfree(pBufferInfo);
66 ION_FUNC_LEAVE;
67 return -ENOMEM;
68 }
69 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
70 if (ret)
71 {
72 vfree(pVA);
73 kfree(pBufferInfo);
74 kfree(table);
75 ION_FUNC_LEAVE;
76 return -ENOMEM;
77 }
78 sg = table->sgl;
79 for (addr=(unsigned int)pVA; addr < (unsigned int) pVA + size; addr += PAGE_SIZE)
80 {
81 struct page *page = vmalloc_to_page((void*)addr);
82 sg_set_page(sg, page, PAGE_SIZE, 0);
83 sg = sg_next(sg);
84 }
85 buffer->sg_table = table;
86
87 pBufferInfo->pVA = pVA;
88 pBufferInfo->eModuleID = -1;
89 buffer->priv_virt = pBufferInfo;
90 ION_FUNC_LEAVE;
91 return 0;
92 }
93
94 static void ion_mm_heap_free(struct ion_buffer *buffer)
95 {
96 ion_mm_buffer_info* pBufferInfo = (ion_mm_buffer_info*) buffer->priv_virt;
97 ION_FUNC_ENTER;
98 mutex_lock(&ion_mm_buffer_info_mutex);
99 if (pBufferInfo)
100 {
101 if ((pBufferInfo->eModuleID != -1) && (pBufferInfo->MVA))
102 m4u_dealloc_mva(pBufferInfo->eModuleID, (unsigned int)pBufferInfo->pVA, buffer->size, pBufferInfo->MVA);
103 if (pBufferInfo->pVA)
104 vfree(pBufferInfo->pVA);
105 kfree(pBufferInfo);
106 if (buffer->sg_table)
107 sg_free_table(buffer->sg_table);
108 kfree(buffer->sg_table);
109 }
110 mutex_unlock(&ion_mm_buffer_info_mutex);
111 ION_FUNC_LEAVE;
112 }
113
114 static void *ion_mm_heap_map_kernel(struct ion_heap *heap,
115 struct ion_buffer *buffer)
116 {
117 ion_mm_buffer_info* pBufferInfo = (ion_mm_buffer_info*) buffer->priv_virt;
118 void* pVA = NULL;
119 ION_FUNC_ENTER;
120 if (pBufferInfo)
121 pVA = pBufferInfo->pVA;
122 ION_FUNC_LEAVE;
123 return pVA;
124 }
125
126 static void ion_mm_heap_unmap_kernel(struct ion_heap *heap,
127 struct ion_buffer *buffer)
128 {
129 ION_FUNC_ENTER;
130 ION_FUNC_LEAVE;
131 }
132
133 static struct sg_table* ion_mm_heap_map_dma(struct ion_heap *heap, struct ion_buffer *buffer)
134 {
135 ION_FUNC_ENTER;
136 ION_FUNC_LEAVE;
137 return buffer->sg_table;
138 }
139
140 static void ion_mm_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buffer)
141 {
142 ION_FUNC_ENTER;
143 ION_FUNC_LEAVE;
144 }
145
146
147 static int ion_mm_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
148 struct vm_area_struct *vma)
149 {
150 int ret;
151 ion_mm_buffer_info* pBufferInfo = (ion_mm_buffer_info*) buffer->priv_virt;
152 ION_FUNC_ENTER;
153 if ((!pBufferInfo) || (!pBufferInfo->pVA))
154 {
155 printk("[ion_mm_heap_map_user]: Error. Invalid buffer.\n");
156 return -EFAULT;
157 }
158 ret = remap_vmalloc_range(vma, pBufferInfo->pVA, vma->vm_pgoff);
159 ION_FUNC_LEAVE;
160 return ret;
161 }
162
163 static int ion_mm_heap_phys(struct ion_heap *heap,
164 struct ion_buffer *buffer,
165 ion_phys_addr_t *addr, size_t *len)
166 {
167 ion_mm_buffer_info* pBufferInfo = (ion_mm_buffer_info*) buffer->priv_virt;
168 ION_FUNC_ENTER;
169 if (!pBufferInfo)
170 {
171 printk("[ion_mm_heap_phys]: Error. Invalid buffer.\n");
172 ION_FUNC_LEAVE;
173 return -EFAULT; // Invalid buffer
174 }
175 if (pBufferInfo->eModuleID == -1)
176 {
177 printk("[ion_mm_heap_phys]: Error. Buffer not configured.\n");
178 ION_FUNC_LEAVE;
179 return -EFAULT; // Buffer not configured.
180 }
181 // Allocate MVA
182 mutex_lock(&ion_mm_buffer_info_mutex);
183 if (pBufferInfo->MVA == 0)
184 {
185 int ret = m4u_alloc_mva(pBufferInfo->eModuleID, (unsigned int)pBufferInfo->pVA, buffer->size, pBufferInfo->security, pBufferInfo->coherent, &pBufferInfo->MVA);
186 if (ret < 0)
187 {
188 mutex_unlock(&ion_mm_buffer_info_mutex);
189 pBufferInfo->MVA = 0;
190 printk("[ion_mm_heap_phys]: Error. Allocate MVA failed.\n");
191 ION_FUNC_LEAVE;
192 return -EFAULT;
193 }
194 }
195 mutex_unlock(&ion_mm_buffer_info_mutex);
196 *addr = (ion_phys_addr_t) pBufferInfo->MVA; // MVA address
197 *len = buffer->size;
198 ION_FUNC_LEAVE;
199 return 0;
200 }
201
202 long ion_mm_ioctl(struct ion_client *client, unsigned int cmd, unsigned long arg, int from_kernel)
203 {
204 ion_mm_data_t Param;
205 long ret = 0;
206 char dbgstr[256];
207 unsigned long ret_copy;
208 ION_FUNC_ENTER;
209 if (from_kernel)
210 Param = *(ion_mm_data_t*) arg;
211 else
212 ret_copy = copy_from_user(&Param, (void __user *)arg, sizeof(ion_mm_data_t));
213 switch (Param.mm_cmd)
214 {
215 case ION_MM_CONFIG_BUFFER:
216 {
217 struct ion_buffer* buffer;
218 if (Param.config_buffer_param.handle)
219 {
220 buffer = ion_handle_buffer(Param.config_buffer_param.handle);
221 if (buffer->heap == g_ion_heaps[ION_HEAP_IDX_MULTIMEDIA])
222 {
223 ion_mm_buffer_info* pBufferInfo = buffer->priv_virt;
224 mutex_lock(&ion_mm_buffer_info_mutex);
225 if (pBufferInfo->MVA == 0)
226 {
227 pBufferInfo->eModuleID = Param.config_buffer_param.eModuleID;
228 pBufferInfo->security = Param.config_buffer_param.security;
229 pBufferInfo->coherent = Param.config_buffer_param.coherent;
230 }
231 else
232 {
233 //printk("[ion_mm_heap]: Warning. Cannot config buffer after GET_PHYS is called.\n");
234 ret = -ION_ERROR_CONFIG_LOCKED;
235 }
236 mutex_unlock(&ion_mm_buffer_info_mutex);
237 }
238 else
239 {
240 printk("[ion_mm_heap]: Error. Cannot configure buffer that is not from multimedia heap.\n");
241 ret = -EFAULT;
242 }
243 }
244 else
245 {
246 printk("[ion_mm_heap]: Error. Configure buffer with invalid handle.\n");
247 ret = -EFAULT;
248 }
249 sprintf(dbgstr, "ION_MM_CONFIG_BUFFER:handle=0x%08X, eModuleID=%d, security=%d, coherent=%d", (unsigned int)Param.config_buffer_param.handle, Param.config_buffer_param.eModuleID, Param.config_buffer_param.security, Param.config_buffer_param.coherent);
250 }
251 break;
252 default:
253 printk("[ion_mm_heap]: Error. Invalid command.\n");
254 ret = -EFAULT;
255 }
256 if (from_kernel)
257 *(ion_mm_data_t*)arg = Param;
258 else
259 ret_copy = copy_to_user((void __user *)arg, &Param, sizeof(ion_mm_data_t));
260 ION_FUNC_LEAVE;
261 return ret;
262 }
263
264 struct ion_heap_ops mm_heap_ops = {
265 .allocate = ion_mm_heap_allocate,
266 .free = ion_mm_heap_free,
267 .map_kernel = ion_mm_heap_map_kernel,
268 .unmap_kernel = ion_mm_heap_unmap_kernel,
269 .map_dma = ion_mm_heap_map_dma,
270 .unmap_dma = ion_mm_heap_unmap_dma,
271 .map_user = ion_mm_heap_map_user,
272 .phys = ion_mm_heap_phys,
273 };
274
275
276 struct ion_heap *ion_mm_heap_create(struct ion_platform_heap *unused)
277 {
278 struct ion_heap *heap;
279 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
280 if (!heap)
281 return ERR_PTR(-ENOMEM);
282 heap->ops = &mm_heap_ops;
283 heap->type = ION_HEAP_TYPE_MULTIMEDIA;
284 return heap;
285 }
286
287 void ion_mm_heap_destroy(struct ion_heap *heap)
288 {
289 kfree(heap);
290 }
291
292
293