#include <linux/stacktrace.h>
#include <linux/prefetch.h>
#include <linux/memcontrol.h>
+#include <linux/aee.h>
#include <trace/events/kmem.h>
+#include <mach/mtk_memcfg.h>
#include "internal.h"
-
+#ifdef CONFIG_ARM64
+#ifdef BUG
+#undef BUG
+#define BUG() \
+ do { \
+ pr_alert("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ *(char *)0 = 0;\
+ } while(0)
+#endif
+#endif
/*
* Lock order:
* 1. slab_mutex (Global Mutex)
unsigned long when; /* When did the operation occur */
};
-enum track_item { TRACK_ALLOC, TRACK_FREE };
+enum track_item { TRACK_FREE, TRACK_ALLOC };
#ifdef CONFIG_SYSFS
static int sysfs_slab_add(struct kmem_cache *);
__bit_spin_unlock(PG_locked, &page->flags);
}
+static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
+{
+ struct page tmp;
+ tmp.counters = counters_new;
+ /*
+ * page->counters can cover frozen/inuse/objects as well
+ * as page->_count. If we assign to ->counters directly
+ * we run the risk of losing updates to page->_count, so
+ * be careful and only assign to the fields we need.
+ */
+ page->frozen = tmp.frozen;
+ page->inuse = tmp.inuse;
+ page->objects = tmp.objects;
+}
+
/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
page->freelist = freelist_new;
- page->counters = counters_new;
+ set_page_slub_counters(page, counters_new);
slab_unlock(page);
return 1;
}
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
page->freelist = freelist_new;
- page->counters = counters_new;
+ set_page_slub_counters(page, counters_new);
slab_unlock(page);
local_irq_restore(flags);
return 1;
{
slab_bug(s, "%s", reason);
print_trailer(s, page, object);
+ BUG();
}
static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
slab_bug(s, "%s", buf);
print_page_info(page);
dump_stack();
+ BUG();
}
static void init_object(struct kmem_cache *s, void *object, u8 val)
fault, end - 1, fault[0], value);
print_trailer(s, page, object);
+ /* trigger BUG before restore_bytes */
+ BUG();
restore_bytes(s, what, value, fault, end);
+
return 0;
}
maxobj = order_objects(compound_order(page), s->size, s->reserved);
if (page->objects > maxobj) {
+ pr_alert("page->objects: %d, maxobj: %d, comporder: %d", page->objects,
+ maxobj, compound_order(page));
+ pr_alert("s->size %d, s->reserved: %d", s->size, s->reserved);
+ print_section("page: ", (void *)page, sizeof(struct page));
+ print_section("kmem_cache: ", (void *)s, sizeof(struct kmem_cache));
slab_err(s, page, "objects %u > max %u",
- s->name, page->objects, maxobj);
+ page->objects, maxobj);
return 0;
}
if (page->inuse > page->objects) {
slab_err(s, page, "inuse %u > max %u",
- s->name, page->inuse, page->objects);
+ page->inuse, page->objects);
return 0;
}
/* Slab_pad_check fixes things up after itself */
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (slub_debug && (!slub_debug_slabs ||
- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+ if(flags & SLAB_NO_DEBUG) {
+ return flags;
+ }
+
+ if (slub_debug && (!slub_debug_slabs || (name &&
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
flags |= slub_debug;
return flags;
flags |= __GFP_NOTRACK;
if (node == NUMA_NO_NODE)
+#ifndef CONFIG_MTK_PAGERECORDER
return alloc_pages(flags, order);
+#else
+ return alloc_pages_nopagedebug(flags, order);
+#endif
else
return alloc_pages_exact_node(node, flags, order);
}
* and freed.
*/
+#ifdef CONFIG_MTK_MEMCFG
+#define MTK_MEMCFG_SLABTRACE_CNT 4
+/* MTK_MEMCFG_SLABTRACE_CNT should be always <= TRACK_ADDRS_COUNT */
+#if (MTK_MEMCFG_SLABTRACE_CNT > TRACK_ADDRS_COUNT)
+#error (MTK_MEMCFG_SLABTRACE_CNT > TRACK_ADDRS_COUNT)
+#endif
+#endif
+
struct location {
unsigned long count;
unsigned long addr;
+#ifdef CONFIG_MTK_MEMCFG
+#ifdef CONFIG_STACKTRACE
+ unsigned long addrs[MTK_MEMCFG_SLABTRACE_CNT]; /* Called from address */
+#endif
+#endif
long long sum_time;
long min_time;
long max_time;
static void free_loc_track(struct loc_track *t)
{
if (t->max)
+#ifndef CONFIG_MTK_PAGERECORDER
free_pages((unsigned long)t->loc,
get_order(sizeof(struct location) * t->max));
+#else
+ __free_pages_nopagedebug((struct page *)t->loc,
+ get_order(sizeof(struct location) * t->max));
+#endif
}
static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
order = get_order(sizeof(struct location) * max);
+#ifndef CONFIG_MTK_PAGERECORDER
l = (void *)__get_free_pages(flags, order);
+#else
+ l = (void *)__get_free_pages_nopagedebug(flags, order);
+#endif
if (!l)
return 0;
page = ACCESS_ONCE(c->partial);
if (page) {
- x = page->pobjects;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ WARN_ON_ONCE(1);
+ else if (flags & SO_OBJECTS)
+ WARN_ON_ONCE(1);
+ else
+ x = page->pages;
total += x;
nodes[node] += x;
}
{
return -EIO;
}
+
+#ifdef CONFIG_MTK_MEMCFG
+
+static int mtk_memcfg_add_location(struct loc_track *t, struct kmem_cache *s,
+ const struct track *track)
+{
+ long start, end, pos;
+ struct location *l;
+ unsigned long (*caddrs)[MTK_MEMCFG_SLABTRACE_CNT]; /* Called from addresses */
+ unsigned long taddrs[MTK_MEMCFG_SLABTRACE_CNT]
+ = { [0 ... MTK_MEMCFG_SLABTRACE_CNT - 1] = 0,}; /* Called from addresses of track */
+ unsigned long age = jiffies - track->when;
+ int i, cnt;
+
+ start = -1;
+ end = t->count;
+
+ /* find the index of track->addr */
+ for (i = 0; i < TRACK_ADDRS_COUNT; i++) {
+ if ((track->addr == track->addrs[i]) ||
+ (track->addr - 4 == track->addrs[i]))
+ break;
+ }
+ cnt = min(MTK_MEMCFG_SLABTRACE_CNT, TRACK_ADDRS_COUNT - i);
+ memcpy(taddrs, track->addrs + i, (cnt * sizeof (unsigned long)));
+
+ for ( ; ; ) {
+ pos = start + (end - start + 1) / 2;
+
+ /*
+ * There is nothing at "end". If we end up there
+ * we need to add something to before end.
+ */
+ if (pos == end)
+ break;
+
+ caddrs = &(t->loc[pos].addrs);
+ if (!memcmp(caddrs, taddrs, MTK_MEMCFG_SLABTRACE_CNT * sizeof (unsigned long))) {
+
+ l = &t->loc[pos];
+ l->count++;
+ if (track->when) {
+ l->sum_time += age;
+ if (age < l->min_time)
+ l->min_time = age;
+ if (age > l->max_time)
+ l->max_time = age;
+
+ if (track->pid < l->min_pid)
+ l->min_pid = track->pid;
+ if (track->pid > l->max_pid)
+ l->max_pid = track->pid;
+
+ cpumask_set_cpu(track->cpu,
+ to_cpumask(l->cpus));
+ }
+ node_set(page_to_nid(virt_to_page(track)), l->nodes);
+ return 1;
+ }
+
+ if (memcmp(caddrs, taddrs, MTK_MEMCFG_SLABTRACE_CNT * sizeof (unsigned long)) < 0)
+ end = pos;
+ else
+ start = pos;
+ }
+
+ /*
+ * Not found. Insert new tracking element.
+ */
+ if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
+ return 0;
+
+ l = t->loc + pos;
+ if (pos < t->count)
+ memmove(l + 1, l,
+ (t->count - pos) * sizeof(struct location));
+ t->count++;
+ l->count = 1;
+ l->addr = track->addr;
+ memcpy(l->addrs, taddrs, MTK_MEMCFG_SLABTRACE_CNT * sizeof (unsigned long));
+ l->sum_time = age;
+ l->min_time = age;
+ l->max_time = age;
+ l->min_pid = track->pid;
+ l->max_pid = track->pid;
+ cpumask_clear(to_cpumask(l->cpus));
+ cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
+ nodes_clear(l->nodes);
+ node_set(page_to_nid(virt_to_page(track)), l->nodes);
+ return 1;
+}
+
+static void mtk_memcfg_process_slab(struct loc_track *t, struct kmem_cache *s,
+ struct page *page, enum track_item alloc,
+ unsigned long *map)
+{
+ void *addr = page_address(page);
+ void *p;
+
+ bitmap_zero(map, page->objects);
+ get_map(s, page, map);
+
+ for_each_object(p, s, addr, page->objects)
+ if (!test_bit(slab_index(p, s, addr), map))
+ mtk_memcfg_add_location(t, s, get_track(s, p, alloc));
+}
+
+static int mtk_memcfg_list_locations(struct kmem_cache *s, struct seq_file *m,
+ enum track_item alloc)
+{
+ unsigned long i, j;
+ struct loc_track t = { 0, 0, NULL };
+ int node;
+ unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
+ GFP_TEMPORARY)) {
+ kfree(map);
+ return seq_printf(m, "Out of memory\n");
+ }
+ /* Push back cpu slabs */
+ flush_all(s);
+
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n = get_node(s, node);
+ unsigned long flags;
+ struct page *page;
+
+ if (!atomic_long_read(&n->nr_slabs))
+ continue;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ mtk_memcfg_process_slab(&t, s, page, alloc, map);
+ list_for_each_entry(page, &n->full, lru)
+ mtk_memcfg_process_slab(&t, s, page, alloc, map);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+
+ for (i = 0; i < t.count; i++) {
+ struct location *l = &t.loc[i];
+
+ seq_printf(m, "%7ld ", l->count);
+
+ if (l->addr)
+ seq_printf(m, "%pS", (void *)l->addr);
+ else
+ seq_printf(m, "<not-available>");
+
+ for (j = 0; j < MTK_MEMCFG_SLABTRACE_CNT; j++)
+ if (l->addrs[j])
+ seq_printf(m, " %p", (void *)l->addrs[j]);
+
+ seq_printf(m, "\n");
+ }
+
+ free_loc_track(&t);
+ kfree(map);
+
+ if (!t.count)
+ seq_printf(m, "No data\n");
+ return 0;
+}
+
+static int mtk_memcfg_slabtrace_show(struct seq_file *m, void *p)
+{
+ struct kmem_cache *s;
+ mutex_lock(&slab_mutex);
+ list_for_each_entry(s, &slab_caches, list) {
+ seq_printf(m, "========== kmem_cache: %s alloc_calls ==========\n", s->name);
+ if (!(s->flags & SLAB_STORE_USER)) {
+ continue;
+ } else {
+ mtk_memcfg_list_locations(s, m, TRACK_ALLOC);
+ }
+ }
+ mutex_unlock(&slab_mutex);
+ return 0;
+}
+
+int slabtrace_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_memcfg_slabtrace_show, NULL);
+}
+
+#endif
+
#endif /* CONFIG_SLABINFO */