static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
int node;
+ struct kmem_cache_node *n;
if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
return;
pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
s->name);
- for_each_online_node(node) {
- struct kmem_cache_node *n = get_node(s, node);
+ for_each_kmem_cache_node(s, node, n) {
unsigned long nr_slabs;
unsigned long nr_objs;
unsigned long nr_free;
- if (!n)
- continue;
-
nr_free = count_partial(n, count_free);
nr_slabs = node_nr_slabs(n);
nr_objs = node_nr_objs(n);
static void free_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
+ struct kmem_cache_node *n;
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = s->node[node];
-
- if (n)
- kmem_cache_free(kmem_cache_node, n);
-
+ for_each_kmem_cache_node(s, node, n) {
+ kmem_cache_free(kmem_cache_node, n);
s->node[node] = NULL;
}
}
static inline int kmem_cache_close(struct kmem_cache *s)
{
int node;
+ struct kmem_cache_node *n;
flush_all(s);
/* Attempt to free all objects */
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
-
+ for_each_kmem_cache_node(s, node, n) {
free_partial(s, n);
if (n->nr_partial || slabs_node(s, node))
return 1;
return -ENOMEM;
flush_all(s);
- for_each_node_state(node, N_NORMAL_MEMORY) {
- n = get_node(s, node);
-
+ for_each_kmem_cache_node(s, node, n) {
if (!n->nr_partial)
continue;
{
int node;
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
+ struct kmem_cache_node *n;
memcpy(s, static_cache, kmem_cache->object_size);
* IPIs around.
*/
__flush_cpu_slab(s, smp_processor_id());
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
+ for_each_kmem_cache_node(s, node, n) {
struct page *p;
- if (n) {
- list_for_each_entry(p, &n->partial, lru)
- p->slab_cache = s;
+ list_for_each_entry(p, &n->partial, lru)
+ p->slab_cache = s;
#ifdef CONFIG_SLUB_DEBUG
- list_for_each_entry(p, &n->full, lru)
- p->slab_cache = s;
+ list_for_each_entry(p, &n->full, lru)
+ p->slab_cache = s;
#endif
- }
}
list_add(&s->list, &slab_caches);
return s;
unsigned long count = 0;
unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
sizeof(unsigned long), GFP_KERNEL);
+ struct kmem_cache_node *n;
if (!map)
return -ENOMEM;
flush_all(s);
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
-
+ for_each_kmem_cache_node(s, node, n)
count += validate_slab_node(s, n, map);
- }
kfree(map);
return count;
}
int node;
unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
sizeof(unsigned long), GFP_KERNEL);
+ struct kmem_cache_node *n;
if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_TEMPORARY)) {
/* Push back cpu slabs */
flush_all(s);
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
+ for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
struct page *page;
get_online_mems();
#ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) {
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
+ struct kmem_cache_node *n;
+
+ for_each_kmem_cache_node(s, node, n) {
if (flags & SO_TOTAL)
x = atomic_long_read(&n->total_objects);
} else
#endif
if (flags & SO_PARTIAL) {
- for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
+ struct kmem_cache_node *n;
+ for_each_kmem_cache_node(s, node, n) {
if (flags & SO_TOTAL)
x = count_partial(n, count_total);
else if (flags & SO_OBJECTS)
}
x = sprintf(buf, "%lu", total);
#ifdef CONFIG_NUMA
- for_each_node_state(node, N_NORMAL_MEMORY)
+ for (node = 0; node < nr_node_ids; node++)
if (nodes[node])
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);
static int any_slab_objects(struct kmem_cache *s)
{
int node;
+ struct kmem_cache_node *n;
- for_each_online_node(node) {
- struct kmem_cache_node *n = get_node(s, node);
-
- if (!n)
- continue;
-
+ for_each_kmem_cache_node(s, node, n)
if (atomic_long_read(&n->total_objects))
return 1;
- }
+
return 0;
}
#endif
unsigned long nr_objs = 0;
unsigned long nr_free = 0;
int node;
+ struct kmem_cache_node *n;
- for_each_online_node(node) {
- struct kmem_cache_node *n = get_node(s, node);
-
- if (!n)
- continue;
-
+ for_each_kmem_cache_node(s, node, n) {
nr_slabs += node_nr_slabs(n);
nr_objs += node_nr_objs(n);
nr_free += count_partial(n, count_free);