kmemcheck: remove annotations
authorLevin, Alexander (Sasha Levin) <alexander.levin@verizon.com>
Thu, 16 Nov 2017 01:35:51 +0000 (17:35 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 22 Feb 2018 14:42:23 +0000 (15:42 +0100)
commit 4950276672fce5c241857540f8561c440663673d upstream.

Patch series "kmemcheck: kill kmemcheck", v2.

As discussed at LSF/MM, kill kmemcheck.

KASan is a replacement that is able to work without the limitation of
kmemcheck (single CPU, slow).  KASan is already upstream.

We are also not aware of any users of kmemcheck (or users who don't
consider KASan as a suitable replacement).

The only objection was that since KASAN wasn't supported by all GCC
versions provided by distros at that time we should hold off for 2
years, and try again.

Now that 2 years have passed, and all distros provide gcc that supports
KASAN, kill kmemcheck again for the very same reasons.

This patch (of 4):

Remove kmemcheck annotations, and calls to kmemcheck from the kernel.

[alexander.levin@verizon.com: correctly remove kmemcheck call from dma_map_sg_attrs]
Link: http://lkml.kernel.org/r/20171012192151.26531-1-alexander.levin@verizon.com
Link: http://lkml.kernel.org/r/20171007030159.22241-2-alexander.levin@verizon.com
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Hansen <devtimhansen@gmail.com>
Cc: Vegard Nossum <vegardno@ifi.uio.no>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
34 files changed:
arch/arm/include/asm/dma-iommu.h
arch/openrisc/include/asm/dma-mapping.h
arch/x86/Makefile
arch/x86/include/asm/dma-mapping.h
arch/x86/include/asm/xor.h
arch/x86/kernel/traps.c
arch/x86/mm/fault.c
drivers/char/random.c
drivers/misc/c2port/core.c
fs/dcache.c
include/linux/c2port.h
include/linux/dma-mapping.h
include/linux/filter.h
include/linux/mm_types.h
include/linux/net.h
include/linux/ring_buffer.h
include/linux/skbuff.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/sock.h
init/main.c
kernel/bpf/core.c
kernel/locking/lockdep.c
kernel/trace/ring_buffer.c
mm/kmemleak.c
mm/page_alloc.c
mm/slab.c
mm/slab.h
mm/slub.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/inet_timewait_sock.c
net/ipv4/tcp_input.c
net/socket.c

index 0722ec6be692381cee16f3f2ea4e8c32ce05677d..6821f1249300d99ee165d53053880ade369b0d78 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
 #include <linux/kref.h>
 
 #define ARM_MAPPING_ERROR              (~(dma_addr_t)0x0)
index f41bd3cb76d905f05c2e977044d121de53d56798..e212a1f0b6d25534e276e043c72826d496e2136f 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
 #include <linux/dma-mapping.h>
 
 extern const struct dma_map_ops or1k_dma_map_ops;
index 504b1a4535acaa252dedc6e6cf8f9031c2caf2f8..fad55160dcb94a28e60d537d3d69d471a1e10e2e 100644 (file)
@@ -158,11 +158,6 @@ ifdef CONFIG_X86_X32
 endif
 export CONFIG_X86_X32_ABI
 
-# Don't unroll struct assignments with kmemcheck enabled
-ifeq ($(CONFIG_KMEMCHECK),y)
-       KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
-endif
-
 #
 # If the function graph tracer is used with mcount instead of fentry,
 # '-maccumulate-outgoing-args' is needed to prevent a GCC bug
index 836ca1178a6afe552258357fecf9b6dbc8102356..69f16f0729d01efb12018c05760380f418647ae8 100644 (file)
@@ -7,7 +7,6 @@
  * Documentation/DMA-API.txt for documentation.
  */
 
-#include <linux/kmemcheck.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
 #include <asm/io.h>
index 1f5c5161ead682664dc30fc5dda802de2de0bc4b..45c8605467f137b78041e2530c81a282829b1bd1 100644 (file)
@@ -1,7 +1,4 @@
-#ifdef CONFIG_KMEMCHECK
-/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
-# include <asm-generic/xor.h>
-#elif !defined(_ASM_X86_XOR_H)
+#ifndef _ASM_X86_XOR_H
 #define _ASM_X86_XOR_H
 
 /*
index 0ddaeb2184a6f0b3ee43d1c4798df394e0029f94..a66428dc92ae0fdbdbf810e4590428302e2d568f 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/edac.h>
 #endif
 
-#include <asm/kmemcheck.h>
 #include <asm/stacktrace.h>
 #include <asm/processor.h>
 #include <asm/debugreg.h>
@@ -764,10 +763,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        if (!dr6 && user_mode(regs))
                user_icebp = 1;
 
-       /* Catch kmemcheck conditions! */
-       if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
-               goto exit;
-
        /* Store the virtualized DR6 value */
        tsk->thread.debugreg6 = dr6;
 
index b264b590eeec0788161ab872e392410960025369..9150fe2c9b2697c9119725012cd75bb902a11e1a 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/cpufeature.h>            /* boot_cpu_has, ...            */
 #include <asm/traps.h>                 /* dotraplinkage, ...           */
 #include <asm/pgalloc.h>               /* pgd_*(), ...                 */
-#include <asm/kmemcheck.h>             /* kmemcheck_*(), ...           */
 #include <asm/fixmap.h>                        /* VSYSCALL_ADDR                */
 #include <asm/vsyscall.h>              /* emulate_vsyscall             */
 #include <asm/vm86.h>                  /* struct vm86                  */
@@ -1257,8 +1256,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
         * Detect and handle instructions that would cause a page fault for
         * both a tracked kernel page and a userspace page.
         */
-       if (kmemcheck_active(regs))
-               kmemcheck_hide(regs);
        prefetchw(&mm->mmap_sem);
 
        if (unlikely(kmmio_fault(regs, address)))
@@ -1281,9 +1278,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
                if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
                        if (vmalloc_fault(address) >= 0)
                                return;
-
-                       if (kmemcheck_fault(regs, address, error_code))
-                               return;
                }
 
                /* Can handle a stale RO->RW TLB: */
index 8ad92707e45f23b890203d5c5468d47473acf636..ea0115cf5fc0d01faa9b20fa2f1c8dd690a9ab80 100644 (file)
 #include <linux/cryptohash.h>
 #include <linux/fips.h>
 #include <linux/ptrace.h>
-#include <linux/kmemcheck.h>
 #include <linux/workqueue.h>
 #include <linux/irq.h>
 #include <linux/syscalls.h>
index 1922cb8f6b88f3d408439cc6dd0fda8b5af54092..1c5b7aec13d46a288ba6659d7ae9e3ad8b17da13 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
-#include <linux/kmemcheck.h>
 #include <linux/ctype.h>
 #include <linux/delay.h>
 #include <linux/idr.h>
@@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name,
                return ERR_PTR(-EINVAL);
 
        c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
-       kmemcheck_annotate_bitfield(c2dev, flags);
        if (unlikely(!c2dev))
                return ERR_PTR(-ENOMEM);
 
index 34c852af215c0f1ff2d73800ed0bc02c9a2f0794..b8d999a5768bfeeb481ef31193d5d114bf2d089a 100644 (file)
@@ -2705,8 +2705,6 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
                         */
                        unsigned int i;
                        BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
-                       kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
-                       kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
                        for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
                                swap(((long *) &dentry->d_iname)[i],
                                     ((long *) &target->d_iname)[i]);
index 4efabcb5134712db3a2be5994d1fada80911eb11..f2736348ca26e70eb72b4e8150639371bd514821 100644 (file)
@@ -9,8 +9,6 @@
  * the Free Software Foundation
  */
 
-#include <linux/kmemcheck.h>
-
 #define C2PORT_NAME_LEN                        32
 
 struct device;
@@ -22,10 +20,8 @@ struct device;
 /* Main struct */
 struct c2port_ops;
 struct c2port_device {
-       kmemcheck_bitfield_begin(flags);
        unsigned int access:1;
        unsigned int flash_access:1;
-       kmemcheck_bitfield_end(flags);
 
        int id;
        char name[C2PORT_NAME_LEN];
index 46930f82a9888aa9975a9bc2f732d9af50c25577..7bf3b99e6fbb1738e38b763692256045a4cd45ea 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/dma-debug.h>
 #include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
-#include <linux/kmemcheck.h>
 #include <linux/bug.h>
 #include <linux/mem_encrypt.h>
 
@@ -230,7 +229,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
        const struct dma_map_ops *ops = get_dma_ops(dev);
        dma_addr_t addr;
 
-       kmemcheck_mark_initialized(ptr, size);
        BUG_ON(!valid_dma_direction(dir));
        addr = ops->map_page(dev, virt_to_page(ptr),
                             offset_in_page(ptr), size,
@@ -263,11 +261,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
                                   unsigned long attrs)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
-       int i, ents;
-       struct scatterlist *s;
+       int ents;
 
-       for_each_sg(sg, s, nents, i)
-               kmemcheck_mark_initialized(sg_virt(s), s->length);
        BUG_ON(!valid_dma_direction(dir));
        ents = ops->map_sg(dev, sg, nents, dir, attrs);
        BUG_ON(ents < 0);
@@ -297,7 +292,6 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
        const struct dma_map_ops *ops = get_dma_ops(dev);
        dma_addr_t addr;
 
-       kmemcheck_mark_initialized(page_address(page) + offset, size);
        BUG_ON(!valid_dma_direction(dir));
        addr = ops->map_page(dev, page, offset, size, dir, attrs);
        debug_dma_map_page(dev, page, offset, size, dir, addr, false);
index 48ec57e70f9f3d9a073f9cdd439c8ed597f14445..42197b16dd78695b507809e6df4d4a8266deafbc 100644 (file)
@@ -454,13 +454,11 @@ struct bpf_binary_header {
 
 struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
-       kmemcheck_bitfield_begin(meta);
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                locked:1,       /* Program image locked? */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1;   /* Do we need dst entry? */
-       kmemcheck_bitfield_end(meta);
        enum bpf_prog_type      type;           /* Type of BPF program */
        u32                     len;            /* Number of filter blocks */
        u32                     jited_len;      /* Size of jited insns in bytes */
index c85f11dafd56064c5c77a4e39cd4ad7a74f65755..9f0bb908e2b5f8228bc2c89a97ac7445b5acbb3c 100644 (file)
@@ -207,14 +207,6 @@ struct page {
                                           not kmapped, ie. highmem) */
 #endif /* WANT_PAGE_VIRTUAL */
 
-#ifdef CONFIG_KMEMCHECK
-       /*
-        * kmemcheck wants to track the status of each byte in a page; this
-        * is a pointer to such a status block. NULL if not tracked.
-        */
-       void *shadow;
-#endif
-
 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
        int _last_cpupid;
 #endif
index d97d80d7fdf8a9c97714d1349b5534ef5509e902..caeb159abda508580dfe4fa15940f36e51bdd1e8 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/random.h>
 #include <linux/wait.h>
 #include <linux/fcntl.h>       /* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/kmemcheck.h>
 #include <linux/rcupdate.h>
 #include <linux/once.h>
 #include <linux/fs.h>
@@ -111,9 +110,7 @@ struct socket_wq {
 struct socket {
        socket_state            state;
 
-       kmemcheck_bitfield_begin(type);
        short                   type;
-       kmemcheck_bitfield_end(type);
 
        unsigned long           flags;
 
index fa6ace66fea5e383bbcec98e064267667ad74b76..289e4d54e3e05e37a620e44199681c8114259bed 100644 (file)
@@ -2,7 +2,6 @@
 #ifndef _LINUX_RING_BUFFER_H
 #define _LINUX_RING_BUFFER_H
 
-#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/seq_file.h>
 #include <linux/poll.h>
@@ -14,9 +13,7 @@ struct ring_buffer_iter;
  * Don't refer to this struct directly, use functions below.
  */
 struct ring_buffer_event {
-       kmemcheck_bitfield_begin(bitfield);
        u32             type_len:5, time_delta:27;
-       kmemcheck_bitfield_end(bitfield);
 
        u32             array[];
 };
index 051e0939ec190ab2d3701508917fa4119e8b17a8..be45224b01d788fd39be92c4c45f1ec9898488f8 100644 (file)
@@ -15,7 +15,6 @@
 #define _LINUX_SKBUFF_H
 
 #include <linux/kernel.h>
-#include <linux/kmemcheck.h>
 #include <linux/compiler.h>
 #include <linux/time.h>
 #include <linux/bug.h>
@@ -706,7 +705,6 @@ struct sk_buff {
        /* Following fields are _not_ copied in __copy_skb_header()
         * Note that queue_mapping is here mostly to fill a hole.
         */
-       kmemcheck_bitfield_begin(flags1);
        __u16                   queue_mapping;
 
 /* if you move cloned around you also must adapt those constants */
@@ -725,7 +723,6 @@ struct sk_buff {
                                head_frag:1,
                                xmit_more:1,
                                __unused:1; /* one bit hole */
-       kmemcheck_bitfield_end(flags1);
 
        /* fields enclosed in headers_start/headers_end are copied
         * using a single memcpy() in __copy_skb_header()
index db8162dd8c0bcbcaffcb1a0f6da1be139a5008d4..8e51b4a69088c211f79b1d5e26029c56df93b99a 100644 (file)
@@ -17,7 +17,6 @@
 #define _INET_SOCK_H
 
 #include <linux/bitops.h>
-#include <linux/kmemcheck.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/jhash.h>
@@ -84,7 +83,6 @@ struct inet_request_sock {
 #define ireq_state             req.__req_common.skc_state
 #define ireq_family            req.__req_common.skc_family
 
-       kmemcheck_bitfield_begin(flags);
        u16                     snd_wscale : 4,
                                rcv_wscale : 4,
                                tstamp_ok  : 1,
@@ -93,7 +91,6 @@ struct inet_request_sock {
                                ecn_ok     : 1,
                                acked      : 1,
                                no_srccheck: 1;
-       kmemcheck_bitfield_end(flags);
        u32                     ir_mark;
        union {
                struct ip_options_rcu __rcu     *ireq_opt;
index 6a75d67a30fd80d15e40e86b59d6216da5e94989..1356fa6a7566bf8b53632215ef8de4b153848f9b 100644 (file)
@@ -15,8 +15,6 @@
 #ifndef _INET_TIMEWAIT_SOCK_
 #define _INET_TIMEWAIT_SOCK_
 
-
-#include <linux/kmemcheck.h>
 #include <linux/list.h>
 #include <linux/timer.h>
 #include <linux/types.h>
@@ -69,14 +67,12 @@ struct inet_timewait_sock {
        /* Socket demultiplex comparisons on incoming packets. */
        /* these three are in inet_sock */
        __be16                  tw_sport;
-       kmemcheck_bitfield_begin(flags);
        /* And these are ours. */
        unsigned int            tw_kill         : 1,
                                tw_transparent  : 1,
                                tw_flowlabel    : 20,
                                tw_pad          : 2,    /* 2 bits hole */
                                tw_tos          : 8;
-       kmemcheck_bitfield_end(flags);
        struct timer_list       tw_timer;
        struct inet_bind_bucket *tw_tb;
 };
index 006580155a87e3fe6039d70842c62bdcb8788a1d..9bd5d68076d9f84fa053bf41a3f343670f7efda9 100644 (file)
@@ -436,7 +436,6 @@ struct sock {
 #define SK_FL_TYPE_MASK    0xffff0000
 #endif
 
-       kmemcheck_bitfield_begin(flags);
        unsigned int            sk_padding : 1,
                                sk_kern_sock : 1,
                                sk_no_check_tx : 1,
@@ -445,8 +444,6 @@ struct sock {
                                sk_protocol  : 8,
                                sk_type      : 16;
 #define SK_PROTOCOL_MAX U8_MAX
-       kmemcheck_bitfield_end(flags);
-
        u16                     sk_gso_max_segs;
        unsigned long           sk_lingertime;
        struct proto            *sk_prot_creator;
index b32ec72cdf3dd8731b53b57975d96d2edd6cbf0a..2d355a61dfc51a5582f834f4fcca382469c83fb0 100644 (file)
@@ -69,7 +69,6 @@
 #include <linux/kgdb.h>
 #include <linux/ftrace.h>
 #include <linux/async.h>
-#include <linux/kmemcheck.h>
 #include <linux/sfi.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
index 2246115365d99fbd614365e1593e616bc77f66f2..d203a5d6b726d6bb80cb7162b0a6e7c534ef67df 100644 (file)
@@ -85,8 +85,6 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
        if (fp == NULL)
                return NULL;
 
-       kmemcheck_annotate_bitfield(fp, meta);
-
        aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
        if (aux == NULL) {
                vfree(fp);
@@ -127,8 +125,6 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
        if (fp == NULL) {
                __bpf_prog_uncharge(fp_old->aux->user, delta);
        } else {
-               kmemcheck_annotate_bitfield(fp, meta);
-
                memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
                fp->pages = pages;
                fp->aux->prog = fp;
@@ -662,8 +658,6 @@ static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
 
        fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
        if (fp != NULL) {
-               kmemcheck_annotate_bitfield(fp, meta);
-
                /* aux->prog still points to the fp_other one, so
                 * when promoting the clone to the real program,
                 * this still needs to be adapted.
index e36e652d996fe682157c52768949f0b9c7f1e6d1..4d362d3e457193ecf5d7f6e74034b8ee1fa128f6 100644 (file)
@@ -47,7 +47,6 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/gfp.h>
-#include <linux/kmemcheck.h>
 #include <linux/random.h>
 #include <linux/jhash.h>
 
@@ -3225,8 +3224,6 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
 {
        int i;
 
-       kmemcheck_mark_initialized(lock, sizeof(*lock));
-
        for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
                lock->class_cache[i] = NULL;
 
index 0476a9372014763fba412ef9f459d8384f73ae94..39c221454186361a9b7d4833d0e7929e6b6fe447 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
 #include <linux/kthread.h>     /* for self test */
-#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/mutex.h>
@@ -2059,7 +2058,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
        }
 
        event = __rb_page_index(tail_page, tail);
-       kmemcheck_annotate_bitfield(event, bitfield);
 
        /* account for padding bytes */
        local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
@@ -2690,7 +2688,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        /* We reserved something on the buffer */
 
        event = __rb_page_index(tail_page, tail);
-       kmemcheck_annotate_bitfield(event, bitfield);
        rb_update_event(cpu_buffer, event, info);
 
        local_inc(&tail_page->entries);
index a1ba553816eb398cdef0bb8f3acd253466d9c9a0..bd1374f402cdac4899b7614353d9d82b26be32b9 100644 (file)
 #include <linux/atomic.h>
 
 #include <linux/kasan.h>
-#include <linux/kmemcheck.h>
 #include <linux/kmemleak.h>
 #include <linux/memory_hotplug.h>
 
@@ -1238,9 +1237,6 @@ static bool update_checksum(struct kmemleak_object *object)
 {
        u32 old_csum = object->checksum;
 
-       if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
-               return false;
-
        kasan_disable_current();
        object->checksum = crc32(0, (void *)object->pointer, object->size);
        kasan_enable_current();
@@ -1314,11 +1310,6 @@ static void scan_block(void *_start, void *_end,
                if (scan_should_stop())
                        break;
 
-               /* don't scan uninitialized memory */
-               if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
-                                                 BYTES_PER_POINTER))
-                       continue;
-
                kasan_disable_current();
                pointer = *ptr;
                kasan_enable_current();
index 2de080003693cebfb037dbb5a6e2e262823149a5..6627caeeaf8245cc8c4ba88fb05bbd31d84e06eb 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/memblock.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
-#include <linux/kmemcheck.h>
 #include <linux/kasan.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
@@ -1022,7 +1021,6 @@ static __always_inline bool free_pages_prepare(struct page *page,
        VM_BUG_ON_PAGE(PageTail(page), page);
 
        trace_mm_page_free(page, order);
-       kmemcheck_free_shadow(page, order);
 
        /*
         * Check tail pages before head page information is cleared to
@@ -2674,15 +2672,6 @@ void split_page(struct page *page, unsigned int order)
        VM_BUG_ON_PAGE(PageCompound(page), page);
        VM_BUG_ON_PAGE(!page_count(page), page);
 
-#ifdef CONFIG_KMEMCHECK
-       /*
-        * Split shadow pages too, because free(page[0]) would
-        * otherwise free the whole shadow.
-        */
-       if (kmemcheck_page_is_tracked(page))
-               split_page(virt_to_page(page[0].shadow), order);
-#endif
-
        for (i = 1; i < (1 << order); i++)
                set_page_refcounted(page + i);
        split_page_owner(page, order);
@@ -4228,9 +4217,6 @@ out:
                page = NULL;
        }
 
-       if (kmemcheck_enabled && page)
-               kmemcheck_pagealloc_alloc(page, order, gfp_mask);
-
        trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
 
        return page;
index b7095884fd93fa957fa25f40e2bc8c4c6cf1efa8..50713827b3ed057dc5aef1907b452fa94b51cbad 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/rtmutex.h>
 #include       <linux/reciprocal_div.h>
 #include       <linux/debugobjects.h>
-#include       <linux/kmemcheck.h>
 #include       <linux/memory.h>
 #include       <linux/prefetch.h>
 #include       <linux/sched/task_stack.h>
@@ -1435,15 +1434,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
        if (sk_memalloc_socks() && page_is_pfmemalloc(page))
                SetPageSlabPfmemalloc(page);
 
-       if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
-               kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
-
-               if (cachep->ctor)
-                       kmemcheck_mark_uninitialized_pages(page, nr_pages);
-               else
-                       kmemcheck_mark_unallocated_pages(page, nr_pages);
-       }
-
        return page;
 }
 
@@ -1455,8 +1445,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
        int order = cachep->gfporder;
        unsigned long nr_freed = (1 << order);
 
-       kmemcheck_free_shadow(page, order);
-
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
                mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
        else
@@ -3516,8 +3504,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, caller);
 
-       kmemcheck_slab_free(cachep, objp, cachep->object_size);
-
        /*
         * Skip calling cache_free_alien() when the platform is not numa.
         * This will avoid cache misses that happen while accessing slabp (which
index 86d7c7d860f92c3a46d505a4b327c76e34f5415a..438e4ace4d9dc877b8a0791b7cbb8173ab85942d 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -40,7 +40,6 @@ struct kmem_cache {
 
 #include <linux/memcontrol.h>
 #include <linux/fault-inject.h>
-#include <linux/kmemcheck.h>
 #include <linux/kasan.h>
 #include <linux/kmemleak.h>
 #include <linux/random.h>
@@ -439,7 +438,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
        for (i = 0; i < size; i++) {
                void *object = p[i];
 
-               kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
                kmemleak_alloc_recursive(object, s->object_size, 1,
                                         s->flags, flags);
                kasan_slab_alloc(s, object, flags);
index 8e1c027a30f4d04b62ba4a38a19802d8f009dd06..2de2fc3adbd2385d58826dc7098b1684d7e5f223 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -22,7 +22,6 @@
 #include <linux/notifier.h>
 #include <linux/seq_file.h>
 #include <linux/kasan.h>
-#include <linux/kmemcheck.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
@@ -1375,7 +1374,6 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x)
                unsigned long flags;
 
                local_irq_save(flags);
-               kmemcheck_slab_free(s, x, s->object_size);
                debug_check_no_locks_freed(x, s->object_size);
                local_irq_restore(flags);
        }
@@ -1596,22 +1594,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
                stat(s, ORDER_FALLBACK);
        }
 
-       if (kmemcheck_enabled &&
-           !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
-               int pages = 1 << oo_order(oo);
-
-               kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
-
-               /*
-                * Objects from caches that have a constructor don't get
-                * cleared when they're allocated, so we need to do it here.
-                */
-               if (s->ctor)
-                       kmemcheck_mark_uninitialized_pages(page, pages);
-               else
-                       kmemcheck_mark_unallocated_pages(page, pages);
-       }
-
        page->objects = oo_objects(oo);
 
        order = compound_order(page);
@@ -1687,8 +1669,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
                        check_object(s, page, p, SLUB_RED_INACTIVE);
        }
 
-       kmemcheck_free_shadow(page, compound_order(page));
-
        mod_lruvec_page_state(page,
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
index 15fa5baa8faef7a609420009fa5be230d28c7ae7..cc811add68c642726711e1ab0e5fe8e1858083bd 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/in.h>
@@ -234,14 +233,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        shinfo = skb_shinfo(skb);
        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
        atomic_set(&shinfo->dataref, 1);
-       kmemcheck_annotate_variable(shinfo->destructor_arg);
 
        if (flags & SKB_ALLOC_FCLONE) {
                struct sk_buff_fclones *fclones;
 
                fclones = container_of(skb, struct sk_buff_fclones, skb1);
 
-               kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
                skb->fclone = SKB_FCLONE_ORIG;
                refcount_set(&fclones->fclone_ref, 1);
 
@@ -301,7 +298,6 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
        shinfo = skb_shinfo(skb);
        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
        atomic_set(&shinfo->dataref, 1);
-       kmemcheck_annotate_variable(shinfo->destructor_arg);
 
        return skb;
 }
@@ -1284,7 +1280,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
                if (!n)
                        return NULL;
 
-               kmemcheck_annotate_bitfield(n, flags1);
                n->fclone = SKB_FCLONE_UNAVAILABLE;
        }
 
index beb1e299fed385a537553febe7e6bc51d6733015..ec6eb546b2284287840f72412cb1b6c7b3fe2f6b 100644 (file)
@@ -1469,8 +1469,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
                sk = kmalloc(prot->obj_size, priority);
 
        if (sk != NULL) {
-               kmemcheck_annotate_bitfield(sk, flags);
-
                if (security_sk_alloc(sk, family, priority))
                        goto out_free;
 
index 5b039159e67a60c13bc2399ae140c90d31ae3dc5..d451b9f19b59da5598a37eb088ff1783f695a7e5 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kmemcheck.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <net/inet_hashtables.h>
@@ -167,8 +166,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
        if (tw) {
                const struct inet_sock *inet = inet_sk(sk);
 
-               kmemcheck_annotate_bitfield(tw, flags);
-
                tw->tw_dr           = dr;
                /* Give us an identity. */
                tw->tw_daddr        = inet->inet_daddr;
index ff48ac654e5ae110cd34e20c9407868e25f2a201..d9d215e27b8ab0f293627fae9875cb0a42df090c 100644 (file)
@@ -6204,7 +6204,6 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
        if (req) {
                struct inet_request_sock *ireq = inet_rsk(req);
 
-               kmemcheck_annotate_bitfield(ireq, flags);
                ireq->ireq_opt = NULL;
 #if IS_ENABLED(CONFIG_IPV6)
                ireq->pktopts = NULL;
index d894c7c5fa54987ab52a2b5eb1354940afe315b9..43d2f17f5eeaceb4f1c5ea6b977db56f57200245 100644 (file)
@@ -568,7 +568,6 @@ struct socket *sock_alloc(void)
 
        sock = SOCKET_I(inode);
 
-       kmemcheck_annotate_bitfield(sock, type);
        inode->i_ino = get_next_ino();
        inode->i_mode = S_IFSOCK | S_IRWXUGO;
        inode->i_uid = current_fsuid();