* Usually, the kmalloc caches are cache_line_size() aligned, except when
* DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
- * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
- * Note that this flag disables some debug features.
+ * alignment larger than the alignment of a 64-bit integer.
+ * ARCH_KMALLOC_MINALIGN allows that.
+ * Note that increasing this value may disable some debug features.
*/
-#define ARCH_KMALLOC_MINALIGN 0
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
#ifndef ARCH_SLAB_MINALIGN
return cachep->obj_size;
}
-static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
+static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
+ return (unsigned long long*) (objp + obj_offset(cachep) -
+ sizeof(unsigned long long));
}
-static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
+static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long *)(objp + cachep->buffer_size -
- 2 * BYTES_PER_WORD);
- return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
+ return (unsigned long long *)(objp + cachep->buffer_size -
+ sizeof(unsigned long long) -
+ BYTES_PER_WORD);
+ return (unsigned long long *) (objp + cachep->buffer_size -
+ sizeof(unsigned long long));
}
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#define obj_offset(x) 0
#define obj_size(cachep) (cachep->buffer_size)
-#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
-#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
+#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
+#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
#endif
char *realobj;
if (cachep->flags & SLAB_RED_ZONE) {
- printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
+ printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
* is greater than BYTES_PER_WORD.
*/
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
- ralign = BYTES_PER_WORD;
+ ralign = __alignof__(unsigned long long);
/* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = align;
}
/* disable debug if necessary */
- if (ralign > BYTES_PER_WORD)
+ if (ralign > __alignof__(unsigned long long))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
*/
if (flags & SLAB_RED_ZONE) {
/* add space for red zone words */
- cachep->obj_offset += BYTES_PER_WORD;
- size += 2 * BYTES_PER_WORD;
+ cachep->obj_offset += sizeof(unsigned long long);
+ size += 2 * sizeof(unsigned long long);
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
- unsigned long redzone1, redzone2;
+ unsigned long long redzone1, redzone2;
redzone1 = *dbg_redzone1(cache, obj);
redzone2 = *dbg_redzone2(cache, obj);
else
slab_error(cache, "memory outside object was overwritten");
- printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
+ printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
obj, redzone1, redzone2);
}
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
printk(KERN_ERR
- "%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
+ "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}