2 * Copyright (c) 2015-2016, Linaro Limited
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/device.h>
15 #include <linux/dma-buf.h>
16 #include <linux/fdtable.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <generated/uapi/linux/version.h>
22 #include "tee_private.h"
24 /* extra references appended to shm object for registered shared memory */
25 struct tee_shm_dmabuf_ref
{
27 struct dma_buf
*dmabuf
;
28 struct dma_buf_attachment
*attach
;
32 static void tee_shm_release(struct tee_shm
*shm
)
34 struct tee_device
*teedev
= shm
->teedev
;
36 mutex_lock(&teedev
->mutex
);
37 idr_remove(&teedev
->idr
, shm
->id
);
40 mutex_unlock(&teedev
->mutex
);
42 if (shm
->flags
& TEE_SHM_EXT_DMA_BUF
) {
43 struct tee_shm_dmabuf_ref
*ref
;
45 ref
= container_of(shm
, struct tee_shm_dmabuf_ref
, shm
);
46 dma_buf_unmap_attachment(ref
->attach
, ref
->sgt
,
48 dma_buf_detach(shm
->dmabuf
, ref
->attach
);
49 dma_buf_put(ref
->dmabuf
);
51 struct tee_shm_pool_mgr
*poolm
;
53 if (shm
->flags
& TEE_SHM_DMA_BUF
)
54 poolm
= &teedev
->pool
->dma_buf_mgr
;
56 poolm
= &teedev
->pool
->private_mgr
;
58 poolm
->ops
->free(poolm
, shm
);
62 tee_device_put(teedev
);
65 static struct sg_table
*tee_shm_op_map_dma_buf(struct dma_buf_attachment
66 *attach
, enum dma_data_direction dir
)
71 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment
*attach
,
72 struct sg_table
*table
,
73 enum dma_data_direction dir
)
77 static void tee_shm_op_release(struct dma_buf
*dmabuf
)
79 struct tee_shm
*shm
= dmabuf
->priv
;
84 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 12)
85 static void *tee_shm_op_map(struct dma_buf
*dmabuf
, unsigned long pgnum
)
90 static void *tee_shm_op_kmap_atomic(struct dma_buf
*dmabuf
, unsigned long pgnum
)
95 static void *tee_shm_op_kmap(struct dma_buf
*dmabuf
, unsigned long pgnum
)
101 static int tee_shm_op_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
103 struct tee_shm
*shm
= dmabuf
->priv
;
104 size_t size
= vma
->vm_end
- vma
->vm_start
;
106 return remap_pfn_range(vma
, vma
->vm_start
, shm
->paddr
>> PAGE_SHIFT
,
107 size
, vma
->vm_page_prot
);
110 static struct dma_buf_ops tee_shm_dma_buf_ops
= {
111 .map_dma_buf
= tee_shm_op_map_dma_buf
,
112 .unmap_dma_buf
= tee_shm_op_unmap_dma_buf
,
113 .release
= tee_shm_op_release
,
114 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 12)
115 .map
= tee_shm_op_map
,
117 .kmap_atomic
= tee_shm_op_kmap_atomic
,
118 .kmap
= tee_shm_op_kmap
,
120 .mmap
= tee_shm_op_mmap
,
124 * tee_shm_alloc() - Allocate shared memory
125 * @ctx: Context that allocates the shared memory
126 * @size: Requested size of shared memory
127 * @flags: Flags setting properties for the requested shared memory.
129 * Memory allocated as global shared memory is automatically freed when the
130 * TEE file pointer is closed. The @flags field uses the bits defined by
131 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
132 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
133 * associated with a dma-buf handle, else driver private memory.
135 struct tee_shm
*tee_shm_alloc(struct tee_context
*ctx
, size_t size
, u32 flags
)
137 struct tee_device
*teedev
= ctx
->teedev
;
138 struct tee_shm_pool_mgr
*poolm
= NULL
;
143 if (!(flags
& TEE_SHM_MAPPED
)) {
144 dev_err(teedev
->dev
.parent
,
145 "only mapped allocations supported\n");
146 return ERR_PTR(-EINVAL
);
149 if ((flags
& ~(TEE_SHM_MAPPED
| TEE_SHM_DMA_BUF
))) {
150 dev_err(teedev
->dev
.parent
, "invalid shm flags 0x%x", flags
);
151 return ERR_PTR(-EINVAL
);
154 if (!tee_device_get(teedev
))
155 return ERR_PTR(-EINVAL
);
158 /* teedev has been detached from driver */
159 ret
= ERR_PTR(-EINVAL
);
163 shm
= kzalloc(sizeof(*shm
), GFP_KERNEL
);
165 ret
= ERR_PTR(-ENOMEM
);
170 shm
->teedev
= teedev
;
172 if (flags
& TEE_SHM_DMA_BUF
)
173 poolm
= &teedev
->pool
->dma_buf_mgr
;
175 poolm
= &teedev
->pool
->private_mgr
;
177 rc
= poolm
->ops
->alloc(poolm
, shm
, size
);
183 mutex_lock(&teedev
->mutex
);
184 shm
->id
= idr_alloc(&teedev
->idr
, shm
, 1, 0, GFP_KERNEL
);
185 mutex_unlock(&teedev
->mutex
);
187 ret
= ERR_PTR(shm
->id
);
191 if (flags
& TEE_SHM_DMA_BUF
) {
192 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,16))
193 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
195 exp_info
.ops
= &tee_shm_dma_buf_ops
;
196 exp_info
.size
= shm
->size
;
197 exp_info
.flags
= O_RDWR
;
200 shm
->dmabuf
= dma_buf_export(&exp_info
);
202 shm
->dmabuf
= dma_buf_export(shm
, &tee_shm_dma_buf_ops
,
205 if (IS_ERR(shm
->dmabuf
)) {
206 ret
= ERR_CAST(shm
->dmabuf
);
210 mutex_lock(&teedev
->mutex
);
211 list_add_tail(&shm
->link
, &ctx
->list_shm
);
212 mutex_unlock(&teedev
->mutex
);
216 mutex_lock(&teedev
->mutex
);
217 idr_remove(&teedev
->idr
, shm
->id
);
218 mutex_unlock(&teedev
->mutex
);
220 poolm
->ops
->free(poolm
, shm
);
224 tee_device_put(teedev
);
227 EXPORT_SYMBOL_GPL(tee_shm_alloc
);
229 struct tee_shm
*tee_shm_register_fd(struct tee_context
*ctx
, int fd
)
231 struct tee_shm_dmabuf_ref
*ref
;
233 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,16))
234 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
237 if (!tee_device_get(ctx
->teedev
))
238 return ERR_PTR(-EINVAL
);
240 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
242 rc
= ERR_PTR(-ENOMEM
);
247 ref
->shm
.teedev
= ctx
->teedev
;
250 ref
->dmabuf
= dma_buf_get(fd
);
252 rc
= ERR_PTR(-EINVAL
);
256 ref
->attach
= dma_buf_attach(ref
->dmabuf
, &ref
->shm
.teedev
->dev
);
257 if (IS_ERR_OR_NULL(ref
->attach
)) {
258 rc
= ERR_PTR(-EINVAL
);
262 ref
->sgt
= dma_buf_map_attachment(ref
->attach
, DMA_BIDIRECTIONAL
);
263 if (IS_ERR_OR_NULL(ref
->sgt
)) {
264 rc
= ERR_PTR(-EINVAL
);
268 if (sg_nents(ref
->sgt
->sgl
) != 1) {
269 rc
= ERR_PTR(-EINVAL
);
273 ref
->shm
.paddr
= sg_dma_address(ref
->sgt
->sgl
);
274 ref
->shm
.size
= sg_dma_len(ref
->sgt
->sgl
);
275 ref
->shm
.flags
= TEE_SHM_DMA_BUF
| TEE_SHM_EXT_DMA_BUF
;
277 mutex_lock(&ref
->shm
.teedev
->mutex
);
278 ref
->shm
.id
= idr_alloc(&ref
->shm
.teedev
->idr
, &ref
->shm
,
280 mutex_unlock(&ref
->shm
.teedev
->mutex
);
281 if (ref
->shm
.id
< 0) {
282 rc
= ERR_PTR(ref
->shm
.id
);
285 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,16))
286 /* export a dmabuf to later get a userland ref */
287 exp_info
.ops
= &tee_shm_dma_buf_ops
;
288 exp_info
.size
= ref
->shm
.size
;
289 exp_info
.flags
= O_RDWR
;
290 exp_info
.priv
= &ref
->shm
;
292 ref
->shm
.dmabuf
= dma_buf_export(&exp_info
);
294 ref
->shm
.dmabuf
= dma_buf_export(&ref
->shm
, &tee_shm_dma_buf_ops
,
295 ref
->shm
.size
, O_RDWR
);
297 if (IS_ERR(ref
->shm
.dmabuf
)) {
298 rc
= ERR_PTR(-EINVAL
);
302 mutex_lock(&ref
->shm
.teedev
->mutex
);
303 list_add_tail(&ref
->shm
.link
, &ctx
->list_shm
);
304 mutex_unlock(&ref
->shm
.teedev
->mutex
);
310 if (ref
->shm
.id
>= 0) {
311 mutex_lock(&ctx
->teedev
->mutex
);
312 idr_remove(&ctx
->teedev
->idr
, ref
->shm
.id
);
313 mutex_unlock(&ctx
->teedev
->mutex
);
316 dma_buf_unmap_attachment(ref
->attach
, ref
->sgt
,
319 dma_buf_detach(ref
->dmabuf
, ref
->attach
);
321 dma_buf_put(ref
->dmabuf
);
324 tee_device_put(ctx
->teedev
);
327 EXPORT_SYMBOL_GPL(tee_shm_register_fd
);
330 * tee_shm_get_fd() - Increase reference count and return file descriptor
331 * @shm: Shared memory handle
332 * @returns user space file descriptor to shared memory
334 int tee_shm_get_fd(struct tee_shm
*shm
)
338 if (!(shm
->flags
& TEE_SHM_DMA_BUF
))
341 fd
= dma_buf_fd(shm
->dmabuf
, O_CLOEXEC
);
343 get_dma_buf(shm
->dmabuf
);
348 * tee_shm_free() - Free shared memory
349 * @shm: Handle to shared memory to free
351 void tee_shm_free(struct tee_shm
*shm
)
354 * dma_buf_put() decreases the dmabuf reference counter and will
355 * call tee_shm_release() when the last reference is gone.
357 * In the case of driver private memory we call tee_shm_release
358 * directly instead as it doesn't have a reference counter.
360 if (shm
->flags
& TEE_SHM_DMA_BUF
)
361 dma_buf_put(shm
->dmabuf
);
363 tee_shm_release(shm
);
365 EXPORT_SYMBOL_GPL(tee_shm_free
);
368 * tee_shm_va2pa() - Get physical address of a virtual address
369 * @shm: Shared memory handle
370 * @va: Virtual address to tranlsate
371 * @pa: Returned physical address
372 * @returns 0 on success and < 0 on failure
374 int tee_shm_va2pa(struct tee_shm
*shm
, void *va
, phys_addr_t
*pa
)
376 if (!(shm
->flags
& TEE_SHM_MAPPED
))
378 /* Check that we're in the range of the shm */
379 if ((char *)va
< (char *)shm
->kaddr
)
381 if ((char *)va
>= ((char *)shm
->kaddr
+ shm
->size
))
384 return tee_shm_get_pa(
385 shm
, (unsigned long)va
- (unsigned long)shm
->kaddr
, pa
);
387 EXPORT_SYMBOL_GPL(tee_shm_va2pa
);
390 * tee_shm_pa2va() - Get virtual address of a physical address
391 * @shm: Shared memory handle
392 * @pa: Physical address to tranlsate
393 * @va: Returned virtual address
394 * @returns 0 on success and < 0 on failure
396 int tee_shm_pa2va(struct tee_shm
*shm
, phys_addr_t pa
, void **va
)
398 if (!(shm
->flags
& TEE_SHM_MAPPED
))
400 /* Check that we're in the range of the shm */
403 if (pa
>= (shm
->paddr
+ shm
->size
))
407 void *v
= tee_shm_get_va(shm
, pa
- shm
->paddr
);
415 EXPORT_SYMBOL_GPL(tee_shm_pa2va
);
418 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
419 * @shm: Shared memory handle
420 * @offs: Offset from start of this shared memory
421 * @returns virtual address of the shared memory + offs if offs is within
422 * the bounds of this shared memory, else an ERR_PTR
424 void *tee_shm_get_va(struct tee_shm
*shm
, size_t offs
)
426 if (!(shm
->flags
& TEE_SHM_MAPPED
))
427 return ERR_PTR(-EINVAL
);
428 if (offs
>= shm
->size
)
429 return ERR_PTR(-EINVAL
);
430 return (char *)shm
->kaddr
+ offs
;
432 EXPORT_SYMBOL_GPL(tee_shm_get_va
);
435 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
436 * @shm: Shared memory handle
437 * @offs: Offset from start of this shared memory
438 * @pa: Physical address to return
439 * @returns 0 if offs is within the bounds of this shared memory, else an
442 int tee_shm_get_pa(struct tee_shm
*shm
, size_t offs
, phys_addr_t
*pa
)
444 if (offs
>= shm
->size
)
447 *pa
= shm
->paddr
+ offs
;
450 EXPORT_SYMBOL_GPL(tee_shm_get_pa
);
453 * tee_shm_get_from_id() - Find shared memory object and increase reference
455 * @ctx: Context owning the shared memory
456 * @id: Id of shared memory object
457 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
459 struct tee_shm
*tee_shm_get_from_id(struct tee_context
*ctx
, int id
)
461 struct tee_device
*teedev
;
465 return ERR_PTR(-EINVAL
);
467 teedev
= ctx
->teedev
;
468 mutex_lock(&teedev
->mutex
);
469 shm
= idr_find(&teedev
->idr
, id
);
470 if (!shm
|| shm
->ctx
!= ctx
)
471 shm
= ERR_PTR(-EINVAL
);
472 else if (shm
->flags
& TEE_SHM_DMA_BUF
)
473 get_dma_buf(shm
->dmabuf
);
474 mutex_unlock(&teedev
->mutex
);
477 EXPORT_SYMBOL_GPL(tee_shm_get_from_id
);
480 * tee_shm_get_id() - Get id of a shared memory object
481 * @shm: Shared memory handle
484 int tee_shm_get_id(struct tee_shm
*shm
)
488 EXPORT_SYMBOL_GPL(tee_shm_get_id
);
491 * tee_shm_put() - Decrease reference count on a shared memory handle
492 * @shm: Shared memory handle
494 void tee_shm_put(struct tee_shm
*shm
)
496 if (shm
->flags
& TEE_SHM_DMA_BUF
)
497 dma_buf_put(shm
->dmabuf
);
499 EXPORT_SYMBOL_GPL(tee_shm_put
);