(unsigned long)selected_size / SZ_1M);
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
cma_declare_contiguous(0, selected_size, 0, align_size,
- KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
+ KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
+ &kvm_cma);
}
}
{
int ret;
- ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
+ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
+ "reserved", res_cma);
if (ret)
return ret;
return -EINVAL;
}
- err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
+ err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
if (err) {
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
+extern const char *cma_get_name(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask);
return cma->count << PAGE_SHIFT;
}
+const char *cma_get_name(const struct cma *cma)
+{
+ return cma->name ? cma->name : "(undefined)";
+}
+
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
int align_order)
{
*/
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma)
{
struct cma *cma;
* subsystems (like slab allocator) are available.
*/
cma = &cma_areas[cma_area_count];
+ if (name) {
+ cma->name = name;
+ } else {
+ cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
+ if (!cma->name)
+ return -ENOMEM;
+ }
cma->base_pfn = PFN_DOWN(base);
cma->count = size >> PAGE_SHIFT;
cma->order_per_bit = order_per_bit;
int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma)
+ bool fixed, const char *name, struct cma **res_cma)
{
phys_addr_t memblock_end = memblock_end_of_DRAM();
phys_addr_t highmem_start;
base = addr;
}
- ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
+ ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
if (ret)
goto err;
struct hlist_head mem_head;
spinlock_t mem_head_lock;
#endif
+ const char *name;
};
extern struct cma cma_areas[MAX_CMA_AREAS];
char name[16];
int u32s;
- sprintf(name, "cma-%d", idx);
+ sprintf(name, "cma-%s", cma->name);
tmp = debugfs_create_dir(name, cma_debugfs_root);