2 * drivers/gpu/ion/ion_cma_heap.c
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/dma-mapping.h>
24 /* for ion_heap_ops structure */
27 #define ION_CMA_ALLOCATE_FAILED -1
34 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
36 struct ion_cma_buffer_info
{
39 struct sg_table
*table
;
43 * Create scatter-list for the already allocated DMA buffer.
44 * This function could be replaced by dma_common_get_sgtable
45 * as soon as it will avalaible.
47 static int ion_cma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
48 void *cpu_addr
, dma_addr_t handle
, size_t size
)
50 struct page
*page
= virt_to_page(cpu_addr
);
53 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
57 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
61 /* ION CMA heap operations functions */
62 static int ion_cma_allocate(struct ion_heap
*heap
, struct ion_buffer
*buffer
,
63 unsigned long len
, unsigned long align
,
66 struct ion_cma_heap
*cma_heap
= to_cma_heap(heap
);
67 struct device
*dev
= cma_heap
->dev
;
68 struct ion_cma_buffer_info
*info
;
70 dev_dbg(dev
, "Request buffer allocation len %ld\n", len
);
72 if (buffer
->flags
& ION_FLAG_CACHED
)
75 if (align
> PAGE_SIZE
)
78 info
= kzalloc(sizeof(struct ion_cma_buffer_info
), GFP_KERNEL
);
80 dev_err(dev
, "Can't allocate buffer info\n");
81 return ION_CMA_ALLOCATE_FAILED
;
84 info
->cpu_addr
= dma_alloc_coherent(dev
, len
, &(info
->handle
),
85 GFP_HIGHUSER
| __GFP_ZERO
);
87 if (!info
->cpu_addr
) {
88 dev_err(dev
, "Fail to allocate buffer\n");
92 info
->table
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
94 dev_err(dev
, "Fail to allocate sg table\n");
98 if (ion_cma_get_sgtable
99 (dev
, info
->table
, info
->cpu_addr
, info
->handle
, len
))
101 /* keep this for memory release */
102 buffer
->priv_virt
= info
;
103 dev_dbg(dev
, "Allocate buffer %p\n", buffer
);
109 dma_free_coherent(dev
, len
, info
->cpu_addr
, info
->handle
);
112 return ION_CMA_ALLOCATE_FAILED
;
115 static void ion_cma_free(struct ion_buffer
*buffer
)
117 struct ion_cma_heap
*cma_heap
= to_cma_heap(buffer
->heap
);
118 struct device
*dev
= cma_heap
->dev
;
119 struct ion_cma_buffer_info
*info
= buffer
->priv_virt
;
121 dev_dbg(dev
, "Release buffer %p\n", buffer
);
123 dma_free_coherent(dev
, buffer
->size
, info
->cpu_addr
, info
->handle
);
124 /* release sg table */
125 sg_free_table(info
->table
);
130 /* return physical address in addr */
131 static int ion_cma_phys(struct ion_heap
*heap
, struct ion_buffer
*buffer
,
132 ion_phys_addr_t
*addr
, size_t *len
)
134 struct ion_cma_heap
*cma_heap
= to_cma_heap(buffer
->heap
);
135 struct device
*dev
= cma_heap
->dev
;
136 struct ion_cma_buffer_info
*info
= buffer
->priv_virt
;
138 dev_dbg(dev
, "Return buffer %p physical address 0x%pa\n", buffer
,
141 *addr
= info
->handle
;
147 static struct sg_table
*ion_cma_heap_map_dma(struct ion_heap
*heap
,
148 struct ion_buffer
*buffer
)
150 struct ion_cma_buffer_info
*info
= buffer
->priv_virt
;
155 static void ion_cma_heap_unmap_dma(struct ion_heap
*heap
,
156 struct ion_buffer
*buffer
)
161 static int ion_cma_mmap(struct ion_heap
*mapper
, struct ion_buffer
*buffer
,
162 struct vm_area_struct
*vma
)
164 struct ion_cma_heap
*cma_heap
= to_cma_heap(buffer
->heap
);
165 struct device
*dev
= cma_heap
->dev
;
166 struct ion_cma_buffer_info
*info
= buffer
->priv_virt
;
168 return dma_mmap_coherent(dev
, vma
, info
->cpu_addr
, info
->handle
,
172 static void *ion_cma_map_kernel(struct ion_heap
*heap
,
173 struct ion_buffer
*buffer
)
175 struct ion_cma_buffer_info
*info
= buffer
->priv_virt
;
176 /* kernel memory mapping has been done at allocation time */
177 return info
->cpu_addr
;
180 static void ion_cma_unmap_kernel(struct ion_heap
*heap
,
181 struct ion_buffer
*buffer
)
185 static struct ion_heap_ops ion_cma_ops
= {
186 .allocate
= ion_cma_allocate
,
187 .free
= ion_cma_free
,
188 .map_dma
= ion_cma_heap_map_dma
,
189 .unmap_dma
= ion_cma_heap_unmap_dma
,
190 .phys
= ion_cma_phys
,
191 .map_user
= ion_cma_mmap
,
192 .map_kernel
= ion_cma_map_kernel
,
193 .unmap_kernel
= ion_cma_unmap_kernel
,
196 struct ion_heap
*ion_cma_heap_create(struct ion_platform_heap
*data
)
198 struct ion_cma_heap
*cma_heap
;
200 cma_heap
= kzalloc(sizeof(struct ion_cma_heap
), GFP_KERNEL
);
203 return ERR_PTR(-ENOMEM
);
205 cma_heap
->heap
.ops
= &ion_cma_ops
;
206 /* get device from private heaps data, later it will be
207 * used to make the link with reserved CMA memory */
208 cma_heap
->dev
= data
->priv
;
209 cma_heap
->heap
.type
= ION_HEAP_TYPE_DMA
;
210 return &cma_heap
->heap
;
213 void ion_cma_heap_destroy(struct ion_heap
*heap
)
215 struct ion_cma_heap
*cma_heap
= to_cma_heap(heap
);