2 * This confidential and proprietary software may be used only as
3 * authorised by a licensing agreement from ARM Limited
4 * (C) COPYRIGHT 2008-2015 ARM Limited
6 * The entire notice above must be reproduced on all authorised
7 * copies and copies may only be made to the extent permitted
8 * by a licensing agreement from ARM Limited.
12 * @file ump_osk_memory.c
13 * Implementation of the OS abstraction layer for the kernel device driver
16 /* needed to detect kernel version specific code */
17 #include <linux/version.h>
20 #include "ump_uk_types.h"
22 #include "ump_kernel_common.h"
23 #include <linux/module.h> /* kernel module definitions */
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
28 #include <asm/memory.h>
29 #include <asm/uaccess.h> /* to verify pointers from user space */
30 #include <asm/cacheflush.h>
31 #include <linux/dma-mapping.h>
33 typedef struct ump_vma_usage_tracker
{
35 ump_memory_allocation
*descriptor
;
36 } ump_vma_usage_tracker
;
38 static void ump_vma_open(struct vm_area_struct
*vma
);
39 static void ump_vma_close(struct vm_area_struct
*vma
);
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
41 static int ump_cpu_page_fault_handler(struct vm_area_struct
*vma
, struct vm_fault
*vmf
);
43 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct
*vma
, unsigned long address
);
46 static struct vm_operations_struct ump_vm_ops
= {
48 .close
= ump_vma_close
,
49 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
50 .fault
= ump_cpu_page_fault_handler
52 .nopfn
= ump_cpu_page_fault_handler
57 * Page fault for VMA region
58 * This should never happen since we always map in the entire virtual memory range.
60 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
61 static int ump_cpu_page_fault_handler(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
63 static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct
*vma
, unsigned long address
)
66 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
68 address
= vmf
->virtual_address
;
70 MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
71 MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma
, address
));
73 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
74 return VM_FAULT_SIGBUS
;
80 static void ump_vma_open(struct vm_area_struct
*vma
)
82 ump_vma_usage_tracker
*vma_usage_tracker
;
85 vma_usage_tracker
= (ump_vma_usage_tracker
*)vma
->vm_private_data
;
86 BUG_ON(NULL
== vma_usage_tracker
);
88 new_val
= atomic_inc_return(&vma_usage_tracker
->references
);
90 DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma
, new_val
));
93 static void ump_vma_close(struct vm_area_struct
*vma
)
95 ump_vma_usage_tracker
*vma_usage_tracker
;
96 _ump_uk_unmap_mem_s args
;
99 vma_usage_tracker
= (ump_vma_usage_tracker
*)vma
->vm_private_data
;
100 BUG_ON(NULL
== vma_usage_tracker
);
102 new_val
= atomic_dec_return(&vma_usage_tracker
->references
);
104 DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma
, new_val
));
107 ump_memory_allocation
*descriptor
;
109 descriptor
= vma_usage_tracker
->descriptor
;
111 args
.ctx
= descriptor
->ump_session
;
112 args
.cookie
= descriptor
->cookie
;
113 args
.mapping
= descriptor
->mapping
;
114 args
.size
= descriptor
->size
;
116 args
._ukk_private
= NULL
; /** @note unused */
118 DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
119 _ump_ukk_unmap_mem(& args
);
121 /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
125 _mali_osk_errcode_t
_ump_osk_mem_mapregion_init(ump_memory_allocation
*descriptor
)
127 ump_vma_usage_tracker
*vma_usage_tracker
;
128 struct vm_area_struct
*vma
;
130 if (NULL
== descriptor
) return _MALI_OSK_ERR_FAULT
;
132 vma_usage_tracker
= kmalloc(sizeof(ump_vma_usage_tracker
), GFP_KERNEL
);
133 if (NULL
== vma_usage_tracker
) {
134 DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
135 return -_MALI_OSK_ERR_FAULT
;
138 vma
= (struct vm_area_struct
*)descriptor
->process_mapping_info
;
140 kfree(vma_usage_tracker
);
141 return _MALI_OSK_ERR_FAULT
;
144 vma
->vm_private_data
= vma_usage_tracker
;
145 vma
->vm_flags
|= VM_IO
;
146 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
147 vma
->vm_flags
|= VM_RESERVED
;
149 vma
->vm_flags
|= VM_DONTDUMP
;
150 vma
->vm_flags
|= VM_DONTEXPAND
;
151 vma
->vm_flags
|= VM_PFNMAP
;
155 if (0 == descriptor
->is_cached
) {
156 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
158 DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma
->vm_page_prot
));
160 /* Setup the functions which handle further VMA handling */
161 vma
->vm_ops
= &ump_vm_ops
;
163 /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
164 descriptor
->mapping
= (void __user
*)vma
->vm_start
;
166 atomic_set(&vma_usage_tracker
->references
, 1); /*this can later be increased if process is forked, see ump_vma_open() */
167 vma_usage_tracker
->descriptor
= descriptor
;
169 return _MALI_OSK_ERR_OK
;
172 void _ump_osk_mem_mapregion_term(ump_memory_allocation
*descriptor
)
174 struct vm_area_struct
*vma
;
175 ump_vma_usage_tracker
*vma_usage_tracker
;
177 if (NULL
== descriptor
) return;
179 /* Linux does the right thing as part of munmap to remove the mapping
180 * All that remains is that we remove the vma_usage_tracker setup in init() */
181 vma
= (struct vm_area_struct
*)descriptor
->process_mapping_info
;
183 vma_usage_tracker
= vma
->vm_private_data
;
185 /* We only get called if mem_mapregion_init succeeded */
186 kfree(vma_usage_tracker
);
190 _mali_osk_errcode_t
_ump_osk_mem_mapregion_map(ump_memory_allocation
*descriptor
, u32 offset
, u32
*phys_addr
, unsigned long size
)
192 struct vm_area_struct
*vma
;
193 _mali_osk_errcode_t retval
;
195 if (NULL
== descriptor
) return _MALI_OSK_ERR_FAULT
;
197 vma
= (struct vm_area_struct
*)descriptor
->process_mapping_info
;
199 if (NULL
== vma
) return _MALI_OSK_ERR_FAULT
;
201 retval
= remap_pfn_range(vma
, ((u32
)descriptor
->mapping
) + offset
, (*phys_addr
) >> PAGE_SHIFT
, size
, vma
->vm_page_prot
) ? _MALI_OSK_ERR_FAULT
: _MALI_OSK_ERR_OK
;;
203 DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
204 ump_dd_secure_id_get(descriptor
->handle
),
206 (unsigned long)(vma
->vm_start
+ offset
),
207 (unsigned long)*phys_addr
,
209 (unsigned int)vma
->vm_page_prot
, vma
->vm_flags
, retval
));
214 static void level1_cache_flush_all(void)
216 DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
217 __cpuc_flush_kern_all();
220 void _ump_osk_msync(ump_dd_mem
*mem
, void *virt
, u32 offset
, u32 size
, ump_uk_msync_op op
, ump_session_data
*session_data
)
224 /* Flush L1 using virtual address, the entire range in one go.
225 * Only flush if user space process has a valid write mapping on given address. */
226 if ((mem
) && (virt
!= NULL
) && (access_ok(VERIFY_WRITE
, virt
, size
))) {
227 __cpuc_flush_dcache_area(virt
, size
);
228 DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. CPU address: %x, size: %x\n", mem
->secure_id
, virt
, size
));
231 if (op
== _UMP_UK_MSYNC_FLUSH_L1
) {
232 DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data
->has_pending_level1_cache_flush
));
233 session_data
->has_pending_level1_cache_flush
= 0;
234 level1_cache_flush_all();
237 if (session_data
->cache_operations_ongoing
) {
238 session_data
->has_pending_level1_cache_flush
++;
239 DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem
->secure_id
, session_data
->has_pending_level1_cache_flush
));
241 /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
242 level1_cache_flush_all();
246 DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__
, __LINE__
));
247 level1_cache_flush_all();
251 if (NULL
== mem
) return;
253 if (mem
->size_bytes
== size
) {
254 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n", mem
->secure_id
));
256 DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
257 mem
->secure_id
, mem
->nr_blocks
, mem
->size_bytes
, size
, offset
, mem
->block_array
[0].addr
));
261 /* Flush L2 using physical addresses, block for block. */
262 for (i
= 0 ; i
< mem
->nr_blocks
; i
++) {
264 ump_dd_physical_block
*block
;
265 block
= &mem
->block_array
[i
];
267 if (offset
>= block
->size
) {
268 offset
-= block
->size
;
273 start_p
= (u32
)block
->addr
+ offset
;
274 /* We'll zero the offset later, after using it to calculate end_p. */
276 start_p
= (u32
)block
->addr
;
279 if (size
< block
->size
- offset
) {
280 end_p
= start_p
+ size
;
284 end_p
= start_p
+ (block
->size
- offset
);
285 size
-= block
->size
- offset
;
288 end_p
= start_p
+ block
->size
;
294 case _UMP_UK_MSYNC_CLEAN
:
295 outer_clean_range(start_p
, end_p
);
297 case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE
:
298 outer_flush_range(start_p
, end_p
);
300 case _UMP_UK_MSYNC_INVALIDATE
:
301 outer_inv_range(start_p
, end_p
);
308 /* Nothing left to flush. */