960531279f384be81880e89c70a8f5177a37566a
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / gpu / mt8127 / mali / mali / linux / mali_memory_dma_buf.c
1 /*
2 * This confidential and proprietary software may be used only as
3 * authorised by a licensing agreement from ARM Limited
4 * (C) COPYRIGHT 2012-2013 ARM Limited
5 * ALL RIGHTS RESERVED
6 * The entire notice above must be reproduced on all authorised
7 * copies and copies may only be made to the extent permitted
8 * by a licensing agreement from ARM Limited.
9 */
10
11 #include <linux/fs.h> /* file system operations */
12 #include <asm/uaccess.h> /* user space access */
13 #include <linux/dma-buf.h>
14 #include <linux/scatterlist.h>
15 #include <linux/rbtree.h>
16 #include <linux/platform_device.h>
17 #include <linux/wait.h>
18 #include <linux/sched.h>
19 #include <linux/mutex.h>
20
21 #include "mali_ukk.h"
22 #include "mali_osk.h"
23 #include "mali_kernel_common.h"
24 #include "mali_session.h"
25 #include "mali_kernel_linux.h"
26
27 #include "mali_memory.h"
28 #include "mali_memory_dma_buf.h"
29
30 #include "mali_pp_job.h"
31
32 static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem);
33
34 struct mali_dma_buf_attachment {
35 struct dma_buf *buf;
36 struct dma_buf_attachment *attachment;
37 struct sg_table *sgt;
38 struct mali_session_data *session;
39 int map_ref;
40 struct mutex map_lock;
41 mali_bool is_mapped;
42 wait_queue_head_t wait_queue;
43 };
44
45 static void mali_dma_buf_release(struct mali_dma_buf_attachment *mem)
46 {
47 MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem));
48
49 MALI_DEBUG_ASSERT_POINTER(mem);
50 MALI_DEBUG_ASSERT_POINTER(mem->attachment);
51 MALI_DEBUG_ASSERT_POINTER(mem->buf);
52
53 #if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
54 /* We mapped implicitly on attach, so we need to unmap on release */
55 mali_dma_buf_unmap(mem);
56 #endif
57
58 /* Wait for buffer to become unmapped */
59 wait_event(mem->wait_queue, !mem->is_mapped);
60 MALI_DEBUG_ASSERT(!mem->is_mapped);
61
62 dma_buf_detach(mem->buf, mem->attachment);
63 dma_buf_put(mem->buf);
64
65 _mali_osk_free(mem);
66 }
67
68 void mali_mem_dma_buf_release(mali_mem_allocation *descriptor)
69 {
70 struct mali_dma_buf_attachment *mem = descriptor->dma_buf.attachment;
71
72 mali_dma_buf_release(mem);
73 }
74
75 /*
76 * Map DMA buf attachment \a mem into \a session at virtual address \a virt.
77 */
78 static int mali_dma_buf_map(struct mali_dma_buf_attachment *mem, struct mali_session_data *session, u32 virt, u32 flags)
79 {
80 struct mali_page_directory *pagedir;
81 struct scatterlist *sg;
82 int i;
83
84 MALI_DEBUG_ASSERT_POINTER(mem);
85 MALI_DEBUG_ASSERT_POINTER(session);
86 MALI_DEBUG_ASSERT(mem->session == session);
87
88 mutex_lock(&mem->map_lock);
89
90 mem->map_ref++;
91
92 MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref));
93
94 if (1 == mem->map_ref) {
95 /* First reference taken, so we need to map the dma buf */
96 MALI_DEBUG_ASSERT(!mem->is_mapped);
97
98 pagedir = mali_session_get_page_directory(session);
99 MALI_DEBUG_ASSERT_POINTER(pagedir);
100
101 mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL);
102 if (IS_ERR_OR_NULL(mem->sgt)) {
103 MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n"));
104 return -EFAULT;
105 }
106
107 for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) {
108 u32 size = sg_dma_len(sg);
109 dma_addr_t phys = sg_dma_address(sg);
110
111 /* sg must be page aligned. */
112 MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
113
114 mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
115
116 virt += size;
117 }
118
119 if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) {
120 u32 guard_phys;
121 MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n"));
122
123 guard_phys = sg_dma_address(mem->sgt->sgl);
124 mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT);
125 }
126
127 mem->is_mapped = MALI_TRUE;
128 mutex_unlock(&mem->map_lock);
129
130 /* Wake up any thread waiting for buffer to become mapped */
131 wake_up_all(&mem->wait_queue);
132 } else {
133 MALI_DEBUG_ASSERT(mem->is_mapped);
134 mutex_unlock(&mem->map_lock);
135 }
136
137 return 0;
138 }
139
140 static void mali_dma_buf_unmap(struct mali_dma_buf_attachment *mem)
141 {
142 MALI_DEBUG_ASSERT_POINTER(mem);
143 MALI_DEBUG_ASSERT_POINTER(mem->attachment);
144 MALI_DEBUG_ASSERT_POINTER(mem->buf);
145
146 mutex_lock(&mem->map_lock);
147
148 mem->map_ref--;
149
150 MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref));
151
152 if (0 == mem->map_ref) {
153 dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL);
154
155 mem->is_mapped = MALI_FALSE;
156 }
157
158 mutex_unlock(&mem->map_lock);
159
160 /* Wake up any thread waiting for buffer to become unmapped */
161 wake_up_all(&mem->wait_queue);
162 }
163
164 #if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
165 int mali_dma_buf_map_job(struct mali_pp_job *job)
166 {
167 mali_mem_allocation *descriptor;
168 struct mali_dma_buf_attachment *mem;
169 _mali_osk_errcode_t err;
170 int i;
171 int ret = 0;
172
173 _mali_osk_mutex_wait(job->session->memory_lock);
174
175 for (i = 0; i < job->num_memory_cookies; i++) {
176 int cookie = job->memory_cookies[i];
177
178 if (0 == cookie) {
179 /* 0 is not a valid cookie */
180 MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
181 continue;
182 }
183
184 MALI_DEBUG_ASSERT(0 < cookie);
185
186 err = mali_descriptor_mapping_get(job->session->descriptor_mapping,
187 cookie, (void**)&descriptor);
188
189 if (_MALI_OSK_ERR_OK != err) {
190 MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to get descriptor for cookie %d\n", cookie));
191 ret = -EFAULT;
192 MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
193 continue;
194 }
195
196 if (MALI_MEM_DMA_BUF != descriptor->type) {
197 /* Not a DMA-buf */
198 MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
199 continue;
200 }
201
202 mem = descriptor->dma_buf.attachment;
203
204 MALI_DEBUG_ASSERT_POINTER(mem);
205 MALI_DEBUG_ASSERT(mem->session == job->session);
206
207 err = mali_dma_buf_map(mem, mem->session, descriptor->mali_mapping.addr, descriptor->flags);
208 if (0 != err) {
209 MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for cookie %d at mali address %x\b",
210 cookie, descriptor->mali_mapping.addr));
211 ret = -EFAULT;
212 MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
213 continue;
214 }
215
216 /* Add mem to list of DMA-bufs mapped for this job */
217 job->dma_bufs[i] = mem;
218 }
219
220 _mali_osk_mutex_signal(job->session->memory_lock);
221
222 return ret;
223 }
224
225 void mali_dma_buf_unmap_job(struct mali_pp_job *job)
226 {
227 int i;
228 for (i = 0; i < job->num_dma_bufs; i++) {
229 if (NULL == job->dma_bufs[i]) continue;
230
231 mali_dma_buf_unmap(job->dma_bufs[i]);
232 job->dma_bufs[i] = NULL;
233 }
234 }
235 #endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
236
237 int mali_attach_dma_buf(struct mali_session_data *session, _mali_uk_attach_dma_buf_s __user *user_arg)
238 {
239 struct dma_buf *buf;
240 struct mali_dma_buf_attachment *mem;
241 _mali_uk_attach_dma_buf_s args;
242 mali_mem_allocation *descriptor;
243 int md;
244 int fd;
245
246 /* Get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
247 if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_attach_dma_buf_s))) {
248 return -EFAULT;
249 }
250
251 if (args.mali_address & ~PAGE_MASK) {
252 MALI_DEBUG_PRINT_ERROR(("Requested address (0x%08x) is not page aligned\n", args.mali_address));
253 return -EINVAL;
254 }
255
256 if (args.mali_address >= args.mali_address + args.size) {
257 MALI_DEBUG_PRINT_ERROR(("Requested address and size (0x%08x + 0x%08x) is too big\n", args.mali_address, args.size));
258 return -EINVAL;
259 }
260
261 fd = args.mem_fd;
262
263 buf = dma_buf_get(fd);
264 if (IS_ERR_OR_NULL(buf)) {
265 MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
266 return PTR_RET(buf);
267 }
268
269 /* Currently, mapping of the full buffer are supported. */
270 if (args.size != buf->size) {
271 MALI_DEBUG_PRINT_ERROR(("dma-buf size doesn't match mapping size.\n"));
272 dma_buf_put(buf);
273 return -EINVAL;
274 }
275
276 mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment));
277 if (NULL == mem) {
278 MALI_DEBUG_PRINT_ERROR(("Failed to allocate dma-buf tracing struct\n"));
279 dma_buf_put(buf);
280 return -ENOMEM;
281 }
282
283 mem->buf = buf;
284 mem->session = session;
285 mem->map_ref = 0;
286 mutex_init(&mem->map_lock);
287 init_waitqueue_head(&mem->wait_queue);
288
289 mem->attachment = dma_buf_attach(mem->buf, &mali_platform_device->dev);
290 if (NULL == mem->attachment) {
291 MALI_DEBUG_PRINT_ERROR(("Failed to attach to dma-buf %d\n", fd));
292 dma_buf_put(mem->buf);
293 _mali_osk_free(mem);
294 return -EFAULT;
295 }
296
297 /* Set up Mali memory descriptor */
298 descriptor = mali_mem_descriptor_create(session, MALI_MEM_DMA_BUF);
299 if (NULL == descriptor) {
300 MALI_DEBUG_PRINT_ERROR(("Failed to allocate descriptor dma-buf %d\n", fd));
301 mali_dma_buf_release(mem);
302 return -ENOMEM;
303 }
304
305 descriptor->size = args.size;
306 descriptor->mali_mapping.addr = args.mali_address;
307
308 descriptor->dma_buf.attachment = mem;
309
310 descriptor->flags |= MALI_MEM_FLAG_DONT_CPU_MAP;
311 if (args.flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) {
312 descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
313 }
314
315 _mali_osk_mutex_wait(session->memory_lock);
316
317 /* Map dma-buf into this session's page tables */
318 if (_MALI_OSK_ERR_OK != mali_mem_mali_map_prepare(descriptor)) {
319 _mali_osk_mutex_signal(session->memory_lock);
320 MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf on Mali\n"));
321 mali_mem_descriptor_destroy(descriptor);
322 mali_dma_buf_release(mem);
323 return -ENOMEM;
324 }
325
326 #if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
327 /* Map memory into session's Mali virtual address space. */
328
329 if (0 != mali_dma_buf_map(mem, session, descriptor->mali_mapping.addr, descriptor->flags)) {
330 mali_mem_mali_map_free(descriptor);
331 _mali_osk_mutex_signal(session->memory_lock);
332
333 MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
334 mali_mem_descriptor_destroy(descriptor);
335 mali_dma_buf_release(mem);
336 return -ENOMEM;
337 }
338
339 #endif
340
341 _mali_osk_mutex_signal(session->memory_lock);
342
343 /* Get descriptor mapping for memory. */
344 if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
345 _mali_osk_mutex_wait(session->memory_lock);
346 mali_mem_mali_map_free(descriptor);
347 _mali_osk_mutex_signal(session->memory_lock);
348
349 MALI_DEBUG_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
350 mali_mem_descriptor_destroy(descriptor);
351 mali_dma_buf_release(mem);
352 return -EFAULT;
353 }
354
355 /* Return stuff to user space */
356 if (0 != put_user(md, &user_arg->cookie)) {
357 _mali_osk_mutex_wait(session->memory_lock);
358 mali_mem_mali_map_free(descriptor);
359 _mali_osk_mutex_signal(session->memory_lock);
360
361 MALI_DEBUG_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
362 mali_descriptor_mapping_free(session->descriptor_mapping, md);
363 mali_dma_buf_release(mem);
364 return -EFAULT;
365 }
366
367 return 0;
368 }
369
370 int mali_release_dma_buf(struct mali_session_data *session, _mali_uk_release_dma_buf_s __user *user_arg)
371 {
372 int ret = 0;
373 _mali_uk_release_dma_buf_s args;
374 mali_mem_allocation *descriptor;
375
376 /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
377 if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_release_dma_buf_s)) ) {
378 return -EFAULT;
379 }
380
381 MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %d\n", args.cookie));
382
383 _mali_osk_mutex_wait(session->memory_lock);
384
385 descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, args.cookie);
386
387 if (NULL != descriptor) {
388 MALI_DEBUG_PRINT(3, ("Mali DMA-buf: Releasing dma-buf at mali address %x\n", descriptor->mali_mapping.addr));
389
390 mali_mem_mali_map_free(descriptor);
391
392 mali_dma_buf_release(descriptor->dma_buf.attachment);
393
394 mali_mem_descriptor_destroy(descriptor);
395 } else {
396 MALI_DEBUG_PRINT_ERROR(("Invalid memory descriptor %d used to release dma-buf\n", args.cookie));
397 ret = -EINVAL;
398 }
399
400 _mali_osk_mutex_signal(session->memory_lock);
401
402 /* Return the error that _mali_ukk_map_external_ump_mem produced */
403 return ret;
404 }
405
406 int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg)
407 {
408 _mali_uk_dma_buf_get_size_s args;
409 int fd;
410 struct dma_buf *buf;
411
412 /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
413 if ( 0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s)) ) {
414 return -EFAULT;
415 }
416
417 /* Do DMA-BUF stuff */
418 fd = args.mem_fd;
419
420 buf = dma_buf_get(fd);
421 if (IS_ERR_OR_NULL(buf)) {
422 MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd));
423 return PTR_RET(buf);
424 }
425
426 if (0 != put_user(buf->size, &user_arg->size)) {
427 dma_buf_put(buf);
428 return -EFAULT;
429 }
430
431 dma_buf_put(buf);
432
433 return 0;
434 }