mm: Rename SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 18 Jan 2017 10:53:44 +0000 (02:53 -0800)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 18 Apr 2017 18:42:36 +0000 (11:42 -0700)
A group of Linux kernel hackers reported chasing a bug that resulted
from their assumption that SLAB_DESTROY_BY_RCU provided an existence
guarantee, that is, that no block from such a slab would be reallocated
during an RCU read-side critical section.  Of course, that is not the
case.  Instead, SLAB_DESTROY_BY_RCU only prevents freeing of an entire
slab of blocks.

However, there is a phrase for this, namely "type safety".  This commit
therefore renames SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU in order
to avoid future instances of this sort of confusion.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: <linux-mm@kvack.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
[ paulmck: Add comments mentioning the old name, as requested by Eric
  Dumazet, in order to help people familiar with the old name find
  the new one. ]
Acked-by: David Rientjes <rientjes@google.com>
30 files changed:
Documentation/RCU/00-INDEX
Documentation/RCU/rculist_nulls.txt
Documentation/RCU/whatisRCU.txt
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
fs/jbd2/journal.c
fs/signalfd.c
include/linux/dma-fence.h
include/linux/slab.h
include/net/sock.h
kernel/fork.c
kernel/signal.c
mm/kasan/kasan.c
mm/kmemcheck.c
mm/rmap.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c
net/llc/af_llc.c
net/llc/llc_conn.c
net/llc/llc_sap.c
net/netfilter/nf_conntrack_core.c
net/smc/af_smc.c

index f773a264ae02918ef6b79f002ff17d0bd4e5df65..1672573b037a73ebe4b169f7044e525b7a4bf246 100644 (file)
@@ -17,7 +17,7 @@ rcu_dereference.txt
 rcubarrier.txt
        - RCU and Unloadable Modules
 rculist_nulls.txt
-       - RCU list primitives for use with SLAB_DESTROY_BY_RCU
+       - RCU list primitives for use with SLAB_TYPESAFE_BY_RCU
 rcuref.txt
        - Reference-count design for elements of lists/arrays protected by RCU
 rcu.txt
index 18f9651ff23d411e96737ec070d4fc6bc29c50fe..8151f0195f7688386e8057ad34fdc9cc153b7fb8 100644 (file)
@@ -1,5 +1,5 @@
 Using hlist_nulls to protect read-mostly linked lists and
-objects using SLAB_DESTROY_BY_RCU allocations.
+objects using SLAB_TYPESAFE_BY_RCU allocations.
 
 Please read the basics in Documentation/RCU/listRCU.txt
 
@@ -7,7 +7,7 @@ Using special makers (called 'nulls') is a convenient way
 to solve following problem :
 
 A typical RCU linked list managing objects which are
-allocated with SLAB_DESTROY_BY_RCU kmem_cache can
+allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can
 use following algos :
 
 1) Lookup algo
@@ -96,7 +96,7 @@ unlock_chain(); // typically a spin_unlock()
 3) Remove algo
 --------------
 Nothing special here, we can use a standard RCU hlist deletion.
-But thanks to SLAB_DESTROY_BY_RCU, beware a deleted object can be reused
+But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused
 very very fast (before the end of RCU grace period)
 
 if (put_last_reference_on(obj) {
index 5cbd8b2395b811489c68acb7da4616b4a1ef9523..91c912e86915115d97e3ca295147f7e9fc748379 100644 (file)
@@ -925,7 +925,8 @@ d.  Do you need RCU grace periods to complete even in the face
 
 e.     Is your workload too update-intensive for normal use of
        RCU, but inappropriate for other synchronization mechanisms?
-       If so, consider SLAB_DESTROY_BY_RCU.  But please be careful!
+       If so, consider SLAB_TYPESAFE_BY_RCU (which was originally
+       named SLAB_DESTROY_BY_RCU).  But please be careful!
 
 f.     Do you need read-side critical sections that are respected
        even though they are in the middle of the idle loop, during
index 6908123162d17cd998c1e7f0bf54a27064e67588..3b668895ac24e9c1e072a6ac461b18167ff7bb1b 100644 (file)
@@ -4552,7 +4552,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
        dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
                                        SLAB_HWCACHE_ALIGN |
                                        SLAB_RECLAIM_ACCOUNT |
-                                       SLAB_DESTROY_BY_RCU);
+                                       SLAB_TYPESAFE_BY_RCU);
        if (!dev_priv->requests)
                goto err_vmas;
 
index ea511f06efaf52b6e1faa806bdf5f7730b72d3b1..9ee2750e1dde5b7181f9c8a537ed5b0c4a544cb8 100644 (file)
@@ -493,7 +493,7 @@ static inline struct drm_i915_gem_request *
 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
 {
        /* Performing a lockless retrieval of the active request is super
-        * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+        * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
         * slab of request objects will not be freed whilst we hold the
         * RCU read lock. It does not guarantee that the request itself
         * will not be freed and then *reused*. Viz,
index 12647af5a33691c798f62e5620944063f535a110..e7fb47e84a93cf7dfcb948c73aeccd8e52423c52 100644 (file)
@@ -1071,7 +1071,7 @@ int ldlm_init(void)
        ldlm_lock_slab = kmem_cache_create("ldlm_locks",
                                           sizeof(struct ldlm_lock), 0,
                                           SLAB_HWCACHE_ALIGN |
-                                          SLAB_DESTROY_BY_RCU, NULL);
+                                          SLAB_TYPESAFE_BY_RCU, NULL);
        if (!ldlm_lock_slab) {
                kmem_cache_destroy(ldlm_resource_slab);
                return -ENOMEM;
index a1a359bfcc9cd4ff84254e464788ab3031dfe90f..7f8f962454e52510d464ed55502e32bbe9d9f3cf 100644 (file)
@@ -2340,7 +2340,7 @@ static int jbd2_journal_init_journal_head_cache(void)
        jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
                                sizeof(struct journal_head),
                                0,              /* offset */
-                               SLAB_TEMPORARY | SLAB_DESTROY_BY_RCU,
+                               SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
                                NULL);          /* ctor */
        retval = 0;
        if (!jbd2_journal_head_cache) {
index 270221fcef42cc42fcfdbc098b587b571be65a12..7e3d71109f51334d2bb8c71a9d2419e6ca4eeb3f 100644 (file)
@@ -38,7 +38,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
        /*
         * The lockless check can race with remove_wait_queue() in progress,
         * but in this case its caller should run under rcu_read_lock() and
-        * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
+        * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
         */
        if (likely(!waitqueue_active(wqh)))
                return;
index 6048fa404e571165b40a7964cb1408f413bba9f0..a5195a7d6f77e40d23d29ba2793c7b393f4eb0bb 100644 (file)
@@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
  *
  * Function returns NULL if no refcount could be obtained, or the fence.
  * This function handles acquiring a reference to a fence that may be
- * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
  * so long as the caller is using RCU on the pointer to the fence.
  *
  * An alternative mechanism is to employ a seqlock to protect a bunch of
@@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
                 * have successfully acquire a reference to it. If it no
                 * longer matches, we are holding a reference to some other
                 * reallocated pointer. This is possible if the allocator
-                * is using a freelist like SLAB_DESTROY_BY_RCU where the
+                * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
                 * fence remains valid for the RCU grace period, but it
                 * may be reallocated. When using such allocators, we are
                 * responsible for ensuring the reference we get is to
index 3c37a8c5192159c88c892779ffa71c17d23b7736..04a7f7993e678d6454b6efaa608db3b88ea78eec 100644 (file)
@@ -28,7 +28,7 @@
 #define SLAB_STORE_USER                0x00010000UL    /* DEBUG: Store the last owner for bug hunting */
 #define SLAB_PANIC             0x00040000UL    /* Panic if kmem_cache_create() fails */
 /*
- * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
+ * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
  *
  * This delays freeing the SLAB page by a grace period, it does _NOT_
  * delay object freeing. This means that if you do kmem_cache_free()
  *
  * rcu_read_lock before reading the address, then rcu_read_unlock after
  * taking the spinlock within the structure expected at that address.
+ *
+ * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
  */
-#define SLAB_DESTROY_BY_RCU    0x00080000UL    /* Defer freeing slabs to RCU */
+#define SLAB_TYPESAFE_BY_RCU   0x00080000UL    /* Defer freeing slabs to RCU */
 #define SLAB_MEM_SPREAD                0x00100000UL    /* Spread some memory over cpuset */
 #define SLAB_TRACE             0x00200000UL    /* Trace allocations and frees */
 
index 5e5997654db6454f82179cc35c4bc22e89d0c06f..59cdccaa30e7450c147d518ea90c8aa60cea744d 100644 (file)
@@ -993,7 +993,7 @@ struct smc_hashinfo;
 struct module;
 
 /*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
  * un-modified. Special care is taken when initializing object to zero.
  */
 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
index 6c463c80e93de8c3be3180f3cbd8694b955a1ac3..9330ce24f1bbb01adb0927b262c4d7e3a36e6660 100644 (file)
@@ -1313,7 +1313,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
        if (atomic_dec_and_test(&sighand->count)) {
                signalfd_cleanup(sighand);
                /*
-                * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
+                * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
                 * without an RCU grace period, see __lock_task_sighand().
                 */
                kmem_cache_free(sighand_cachep, sighand);
@@ -2144,7 +2144,7 @@ void __init proc_caches_init(void)
 {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
                        SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
index 7e59ebc2c25e669ef60f54d6e3ae6839c34d2f2a..6df5f72158e436c2697075213aad28acc0d86f41 100644 (file)
@@ -1237,7 +1237,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
                }
                /*
                 * This sighand can be already freed and even reused, but
-                * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
+                * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
                 * initializes ->siglock: this slab can't go away, it has
                 * the same object type, ->siglock can't be reinitialized.
                 *
index 98b27195e38b07fc1b6e20c1f9c49db9bc112303..4b20061102f62423f1cf539ad675971503624042 100644 (file)
@@ -413,7 +413,7 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
        *size += sizeof(struct kasan_alloc_meta);
 
        /* Add free meta. */
-       if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
+       if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
            cache->object_size < sizeof(struct kasan_free_meta)) {
                cache->kasan_info.free_meta_offset = *size;
                *size += sizeof(struct kasan_free_meta);
@@ -561,7 +561,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
        unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
 
        /* RCU slabs could be legally used after free within the RCU period */
-       if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
+       if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
                return;
 
        kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
@@ -572,7 +572,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
        s8 shadow_byte;
 
        /* RCU slabs could be legally used after free within the RCU period */
-       if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
+       if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
                return false;
 
        shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
index 5bf191756a4a07b04ffe1dd792f475e1e0af0492..2d5959c5f7c50469ca3d59da9c62c6c2dd932917 100644 (file)
@@ -95,7 +95,7 @@ void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
 {
        /* TODO: RCU freeing is unsupported for now; hide false positives. */
-       if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
+       if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU))
                kmemcheck_mark_freed(object, size);
 }
 
index 49ed681ccc7b01d5e2a73b48b62a1da4ac9731f2..8ffd59df8a3fbc61207d87e969e2fae6bd5bab19 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -430,7 +430,7 @@ static void anon_vma_ctor(void *data)
 void __init anon_vma_init(void)
 {
        anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
-                       0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
+                       0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
                        anon_vma_ctor);
        anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
                        SLAB_PANIC|SLAB_ACCOUNT);
@@ -481,7 +481,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
         * If this page is still mapped, then its anon_vma cannot have been
         * freed.  But if it has been unmapped, we have no security against the
         * anon_vma structure being freed and reused (for another anon_vma:
-        * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
+        * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
         * above cannot corrupt).
         */
        if (!page_mapped(page)) {
index 807d86c769088681b47f41c0cc0a721307bd16c1..93c827864862f7891d160f12892ab90537fb7989 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1728,7 +1728,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
 
        freelist = page->freelist;
        slab_destroy_debugcheck(cachep, page);
-       if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
+       if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
                call_rcu(&page->rcu_head, kmem_rcu_free);
        else
                kmem_freepages(cachep, page);
@@ -1924,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
 
        cachep->num = 0;
 
-       if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
+       if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
                return false;
 
        left = calculate_slab_order(cachep, size,
@@ -2030,7 +2030,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
        if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
                                                2 * sizeof(unsigned long long)))
                flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
-       if (!(flags & SLAB_DESTROY_BY_RCU))
+       if (!(flags & SLAB_TYPESAFE_BY_RCU))
                flags |= SLAB_POISON;
 #endif
 #endif
index 65e7c3fcac72790acece0ac140d864151f95f166..9cfcf099709c19cfc8b5070325a0527c763eddaa 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -126,7 +126,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
+                        SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 
 #if defined(CONFIG_DEBUG_SLAB)
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
@@ -415,7 +415,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
         * back there or track user information then we can
         * only use the space before that information.
         */
-       if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+       if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
                return s->inuse;
        /*
         * Else we can use all the padding etc for the allocation
index 09d0e849b07f47d82f5d9a5cda4862517d297cb2..01a0fe2eb33267f8f04f7e90fd79358cd1f41d07 100644 (file)
@@ -39,7 +39,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
  * Set of flags that will prevent slab merging
  */
 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
-               SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
+               SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
                SLAB_FAILSLAB | SLAB_KASAN)
 
 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
@@ -500,7 +500,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
        struct kmem_cache *s, *s2;
 
        /*
-        * On destruction, SLAB_DESTROY_BY_RCU kmem_caches are put on the
+        * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
         * @slab_caches_to_rcu_destroy list.  The slab pages are freed
         * through RCU and and the associated kmem_cache are dereferenced
         * while freeing the pages, so the kmem_caches should be freed only
@@ -537,7 +537,7 @@ static int shutdown_cache(struct kmem_cache *s)
        memcg_unlink_cache(s);
        list_del(&s->list);
 
-       if (s->flags & SLAB_DESTROY_BY_RCU) {
+       if (s->flags & SLAB_TYPESAFE_BY_RCU) {
                list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
                schedule_work(&slab_caches_to_rcu_destroy_work);
        } else {
index eac04d4357ec6b8d653de4c30c96ffdd97974462..1bae78d71096ad26bbe4575f631f3fad7b694388 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,7 +126,7 @@ static inline void clear_slob_page_free(struct page *sp)
 
 /*
  * struct slob_rcu is inserted at the tail of allocated slob blocks, which
- * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
+ * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
  * the block using call_rcu.
  */
 struct slob_rcu {
@@ -524,7 +524,7 @@ EXPORT_SYMBOL(ksize);
 
 int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
 {
-       if (flags & SLAB_DESTROY_BY_RCU) {
+       if (flags & SLAB_TYPESAFE_BY_RCU) {
                /* leave room for rcu footer at the end of object */
                c->size += sizeof(struct slob_rcu);
        }
@@ -598,7 +598,7 @@ static void kmem_rcu_free(struct rcu_head *head)
 void kmem_cache_free(struct kmem_cache *c, void *b)
 {
        kmemleak_free_recursive(b, c->flags);
-       if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+       if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
                struct slob_rcu *slob_rcu;
                slob_rcu = b + (c->size - sizeof(struct slob_rcu));
                slob_rcu->size = c->size;
index 7f4bc7027ed53536efaaf5663f007bd2442de503..57e5156f02be6bcc23e70ec801e9cc1c3bbdd631 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1687,7 +1687,7 @@ static void rcu_free_slab(struct rcu_head *h)
 
 static void free_slab(struct kmem_cache *s, struct page *page)
 {
-       if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
+       if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
                struct rcu_head *head;
 
                if (need_reserve_slab_rcu) {
@@ -2963,7 +2963,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
         * slab_free_freelist_hook() could have put the items into quarantine.
         * If so, no need to free them.
         */
-       if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU))
+       if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
                return;
        do_slab_free(s, page, head, tail, cnt, addr);
 }
@@ -3433,7 +3433,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
         * the slab may touch the object after free or before allocation
         * then we should never poison the object itself.
         */
-       if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
+       if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
                        !s->ctor)
                s->flags |= __OBJECT_POISON;
        else
@@ -3455,7 +3455,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
         */
        s->inuse = size;
 
-       if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+       if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
                s->ctor)) {
                /*
                 * Relocate free pointer after the object if it is not
@@ -3537,7 +3537,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
        s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
        s->reserved = 0;
 
-       if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
+       if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
                s->reserved = sizeof(struct rcu_head);
 
        if (!calculate_sizes(s, -1))
@@ -5042,7 +5042,7 @@ SLAB_ATTR_RO(cache_dma);
 
 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
+       return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
 }
 SLAB_ATTR_RO(destroy_by_rcu);
 
index 409d0cfd34474812c3bf74f26cd423a3d65ee441..90210a0e388853f1ec4bcaced43b846e0e7a3b13 100644 (file)
@@ -950,7 +950,7 @@ static struct proto dccp_v4_prot = {
        .orphan_count           = &dccp_orphan_count,
        .max_header             = MAX_DCCP_HEADER,
        .obj_size               = sizeof(struct dccp_sock),
-       .slab_flags             = SLAB_DESTROY_BY_RCU,
+       .slab_flags             = SLAB_TYPESAFE_BY_RCU,
        .rsk_prot               = &dccp_request_sock_ops,
        .twsk_prot              = &dccp_timewait_sock_ops,
        .h.hashinfo             = &dccp_hashinfo,
index 233b57367758c64c09ed40f7359cb8fcb1918d93..b4019a5e455142244bc684d034f5c397dc58863a 100644 (file)
@@ -1012,7 +1012,7 @@ static struct proto dccp_v6_prot = {
        .orphan_count      = &dccp_orphan_count,
        .max_header        = MAX_DCCP_HEADER,
        .obj_size          = sizeof(struct dccp6_sock),
-       .slab_flags        = SLAB_DESTROY_BY_RCU,
+       .slab_flags        = SLAB_TYPESAFE_BY_RCU,
        .rsk_prot          = &dccp6_request_sock_ops,
        .twsk_prot         = &dccp6_timewait_sock_ops,
        .h.hashinfo        = &dccp_hashinfo,
index 9a89b8deafae1e9b2e8d1d9bc211c9c30b8dd8ec..82c89abeb9893670c3bae99de6c6084ae85c5b71 100644 (file)
@@ -2398,7 +2398,7 @@ struct proto tcp_prot = {
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
        .obj_size               = sizeof(struct tcp_sock),
-       .slab_flags             = SLAB_DESTROY_BY_RCU,
+       .slab_flags             = SLAB_TYPESAFE_BY_RCU,
        .twsk_prot              = &tcp_timewait_sock_ops,
        .rsk_prot               = &tcp_request_sock_ops,
        .h.hashinfo             = &tcp_hashinfo,
index 60a5295a7de6e877f5ab80ef32314c573c289d81..bdbc4327ebeedaf46640a8adfeb416fb09afab69 100644 (file)
@@ -1919,7 +1919,7 @@ struct proto tcpv6_prot = {
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
        .obj_size               = sizeof(struct tcp6_sock),
-       .slab_flags             = SLAB_DESTROY_BY_RCU,
+       .slab_flags             = SLAB_TYPESAFE_BY_RCU,
        .twsk_prot              = &tcp6_timewait_sock_ops,
        .rsk_prot               = &tcp6_request_sock_ops,
        .h.hashinfo             = &tcp_hashinfo,
index 06186d608a274eb46cd768610c67e8a5a8e84c15..d096ca5630543304277a243aa200ecf174dd7f03 100644 (file)
@@ -142,7 +142,7 @@ static struct proto llc_proto = {
        .name     = "LLC",
        .owner    = THIS_MODULE,
        .obj_size = sizeof(struct llc_sock),
-       .slab_flags = SLAB_DESTROY_BY_RCU,
+       .slab_flags = SLAB_TYPESAFE_BY_RCU,
 };
 
 /**
index 8bc5a1bd2d453542df31506f543feb64b64cdd96..9b02c13d258b005bb10029b3baf8ec4db71f18b4 100644 (file)
@@ -506,7 +506,7 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
 again:
        sk_nulls_for_each_rcu(rc, node, laddr_hb) {
                if (llc_estab_match(sap, daddr, laddr, rc)) {
-                       /* Extra checks required by SLAB_DESTROY_BY_RCU */
+                       /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
                        if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
                                goto again;
                        if (unlikely(llc_sk(rc)->sap != sap ||
@@ -565,7 +565,7 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap,
 again:
        sk_nulls_for_each_rcu(rc, node, laddr_hb) {
                if (llc_listener_match(sap, laddr, rc)) {
-                       /* Extra checks required by SLAB_DESTROY_BY_RCU */
+                       /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
                        if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
                                goto again;
                        if (unlikely(llc_sk(rc)->sap != sap ||
index 5404d0d195cc581613e356b75bd70321e617673e..63b6ab0563705f4b15c6bc8e3d9c7ad85a2af381 100644 (file)
@@ -328,7 +328,7 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
 again:
        sk_nulls_for_each_rcu(rc, node, laddr_hb) {
                if (llc_dgram_match(sap, laddr, rc)) {
-                       /* Extra checks required by SLAB_DESTROY_BY_RCU */
+                       /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
                        if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
                                goto again;
                        if (unlikely(llc_sk(rc)->sap != sap ||
index 071b97fcbefb083ded417e06e739a4622b237fe8..fdcdac7916b2205cf6b7a278d695644fec2a100b 100644 (file)
@@ -914,7 +914,7 @@ static unsigned int early_drop_list(struct net *net,
                        continue;
 
                /* kill only if still in same netns -- might have moved due to
-                * SLAB_DESTROY_BY_RCU rules.
+                * SLAB_TYPESAFE_BY_RCU rules.
                 *
                 * We steal the timer reference.  If that fails timer has
                 * already fired or someone else deleted it. Just drop ref
@@ -1069,7 +1069,7 @@ __nf_conntrack_alloc(struct net *net,
 
        /*
         * Do not use kmem_cache_zalloc(), as this cache uses
-        * SLAB_DESTROY_BY_RCU.
+        * SLAB_TYPESAFE_BY_RCU.
         */
        ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
        if (ct == NULL)
@@ -1114,7 +1114,7 @@ void nf_conntrack_free(struct nf_conn *ct)
        struct net *net = nf_ct_net(ct);
 
        /* A freed object has refcnt == 0, that's
-        * the golden rule for SLAB_DESTROY_BY_RCU
+        * the golden rule for SLAB_TYPESAFE_BY_RCU
         */
        NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
 
@@ -1878,7 +1878,7 @@ int nf_conntrack_init_start(void)
        nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
                                                sizeof(struct nf_conn),
                                                NFCT_INFOMASK + 1,
-                                               SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
+                                               SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
        if (!nf_conntrack_cachep)
                goto err_cachep;
 
index 85837ab90e8916e612d5dd0a21ef48c5e2c9e544..d34bbd6d8f38c6f1ea7db0f9fefd84d989c58ca6 100644 (file)
@@ -101,7 +101,7 @@ struct proto smc_proto = {
        .unhash         = smc_unhash_sk,
        .obj_size       = sizeof(struct smc_sock),
        .h.smc_hash     = &smc_v4_hashinfo,
-       .slab_flags     = SLAB_DESTROY_BY_RCU,
+       .slab_flags     = SLAB_TYPESAFE_BY_RCU,
 };
 EXPORT_SYMBOL_GPL(smc_proto);