atomic_inc(&page->_count);
}
+static inline struct page *virt_to_head_page(const void *x)
+{
+ struct page *page = virt_to_page(x);
+ return compound_head(page);
+}
+
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
static inline struct slab *page_get_slab(struct page *page)
{
- page = compound_head(page);
BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev;
}
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
- struct page *page = virt_to_page(obj);
+ struct page *page = virt_to_head_page(obj);
return page_get_cache(page);
}
static inline struct slab *virt_to_slab(const void *obj)
{
- struct page *page = virt_to_page(obj);
+ struct page *page = virt_to_head_page(obj);
return page_get_slab(page);
}
objp -= obj_offset(cachep);
kfree_debugcheck(objp);
- page = virt_to_page(objp);
+ page = virt_to_head_page(objp);
slabp = page_get_slab(page);
struct slab *slabp;
unsigned objnr;
- slabp = page_get_slab(virt_to_page(objp));
+ slabp = page_get_slab(virt_to_head_page(objp));
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
}
{
struct page * page;
- page = virt_to_page(x);
-
- page = compound_head(page);
+ page = virt_to_head_page(x);
if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
set_tracking(s, x, TRACK_FREE);
/* Figure out on which slab object the object resides */
static struct page *get_object_page(const void *x)
{
- struct page *page = compound_head(virt_to_page(x));
+ struct page *page = virt_to_head_page(x);
if (!PageSlab(page))
return NULL;
if (!x)
return;
- page = compound_head(virt_to_page(x));
+ page = virt_to_head_page(x);
s = page->slab;
return NULL;
}
- page = compound_head(virt_to_page(p));
+ page = virt_to_head_page(p);
new_cache = get_slab(new_size, flags);