From 6b9d7c8e8ecf35dc9ba6763a45d81e54ee3ffcde Mon Sep 17 00:00:00 2001 From: "Dennis Zhou (Facebook)" Date: Mon, 24 Jul 2017 19:02:03 -0400 Subject: [PATCH] percpu: end chunk area maps page aligned for the populated bitmap The area map allocator manages the first chunk area by hiding all but the region it is responsible for serving in the area map. To align this with the populated page bitmap, end_offset is introduced to keep track of the delta to end page aligned. The area map is appended with the page aligned end when necessary to be in line with how the bitmap allocator requires the ending to be aligned with the LCM of PAGE_SIZE and the size of each bitmap block. percpu_stats is updated to ignore this region when present. Signed-off-by: Dennis Zhou Reviewed-by: Josef Bacik Signed-off-by: Tejun Heo --- mm/percpu-internal.h | 3 +++ mm/percpu-stats.c | 5 +++-- mm/percpu.c | 9 +++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index c876b5b5bc18..f02f31cea0e6 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -26,6 +26,9 @@ struct pcpu_chunk { int start_offset; /* the overlap with the previous region to have a page aligned base_addr */ + int end_offset; /* additional area required to + have the region end page + aligned */ int nr_populated; /* # of populated pages */ unsigned long populated[]; /* populated bitmap */ }; diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c index 32f3550ea099..ffbdb96cdbeb 100644 --- a/mm/percpu-stats.c +++ b/mm/percpu-stats.c @@ -51,7 +51,7 @@ static int find_max_map_used(void) static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, int *buffer) { - int i, s_index, last_alloc, alloc_sign, as_len; + int i, s_index, e_index, last_alloc, alloc_sign, as_len; int *alloc_sizes, *p; /* statistics */ int sum_frag = 0, max_frag = 0; @@ -59,10 +59,11 @@ static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, alloc_sizes = buffer; s_index = (chunk->start_offset) ? 1 : 0; + e_index = chunk->map_used - ((chunk->end_offset) ? 1 : 0); /* find last allocation */ last_alloc = -1; - for (i = chunk->map_used - 1; i >= s_index; i--) { + for (i = e_index - 1; i >= s_index; i--) { if (chunk->map[i] & 1) { last_alloc = i; break; diff --git a/mm/percpu.c b/mm/percpu.c index 2e785a77ce14..1d2c980fde3f 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -715,12 +715,16 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(void *base_addr, int init_map_size) { struct pcpu_chunk *chunk; + int region_size; + + region_size = PFN_ALIGN(start_offset + map_size); chunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); INIT_LIST_HEAD(&chunk->list); INIT_LIST_HEAD(&chunk->map_extend_list); chunk->base_addr = base_addr; chunk->start_offset = start_offset; + chunk->end_offset = region_size - chunk->start_offset - map_size; chunk->map = map; chunk->map_alloc = init_map_size; @@ -735,6 +739,11 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(void *base_addr, chunk->map[2] = (chunk->start_offset + chunk->free_size) | 1; chunk->map_used = 2; + if (chunk->end_offset) { + /* hide the end of the bitmap */ + chunk->map[++chunk->map_used] = region_size | 1; + } + return chunk; } -- 2.20.1