*/
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
might_sleep_if(flags & __GFP_WAIT);
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
+ flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, s->objsize);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
}
goto load_freelist;
}
+ gfpflags &= gfp_allowed_mask;
if (gfpflags & __GFP_WAIT)
local_irq_enable();
struct kmem_cache_cpu *c;
unsigned long flags;
- gfpflags &= gfp_allowed_mask;
-
if (slab_pre_alloc_hook(s, gfpflags))
return NULL;