#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
#include <linux/kref.h>
#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
*/
#include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
#include <linux/dma-mapping.h>
extern const struct dma_map_ops or1k_dma_map_ops;
endif
export CONFIG_X86_X32_ABI
-# Don't unroll struct assignments with kmemcheck enabled
-ifeq ($(CONFIG_KMEMCHECK),y)
- KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
-endif
-
#
# If the function graph tracer is used with mcount instead of fentry,
# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
* Documentation/DMA-API.txt for documentation.
*/
-#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <asm/io.h>
-#ifdef CONFIG_KMEMCHECK
-/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
-# include <asm-generic/xor.h>
-#elif !defined(_ASM_X86_XOR_H)
+#ifndef _ASM_X86_XOR_H
#define _ASM_X86_XOR_H
/*
#include <linux/edac.h>
#endif
-#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
if (!dr6 && user_mode(regs))
user_icebp = 1;
- /* Catch kmemcheck conditions! */
- if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
- goto exit;
-
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
-#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
#include <asm/fixmap.h> /* VSYSCALL_ADDR */
#include <asm/vsyscall.h> /* emulate_vsyscall */
#include <asm/vm86.h> /* struct vm86 */
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
- if (kmemcheck_active(regs))
- kmemcheck_hide(regs);
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
-
- if (kmemcheck_fault(regs, address, error_code))
- return;
}
/* Can handle a stale RO->RW TLB: */
#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
-#include <linux/kmemcheck.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
return ERR_PTR(-EINVAL);
c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
- kmemcheck_annotate_bitfield(c2dev, flags);
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
*/
unsigned int i;
BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
- kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
- kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
swap(((long *) &dentry->d_iname)[i],
((long *) &target->d_iname)[i]);
* the Free Software Foundation
*/
-#include <linux/kmemcheck.h>
-
#define C2PORT_NAME_LEN 32
struct device;
/* Main struct */
struct c2port_ops;
struct c2port_device {
- kmemcheck_bitfield_begin(flags);
unsigned int access:1;
unsigned int flash_access:1;
- kmemcheck_bitfield_end(flags);
int id;
char name[C2PORT_NAME_LEN];
#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
-#include <linux/kmemcheck.h>
#include <linux/bug.h>
#include <linux/mem_encrypt.h>
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
- kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- int i, ents;
- struct scatterlist *s;
+ int ents;
- for_each_sg(sg, s, nents, i)
- kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0);
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
- kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
struct bpf_prog {
u16 pages; /* Number of allocated pages */
- kmemcheck_bitfield_begin(meta);
u16 jited:1, /* Is our filter JIT'ed? */
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1; /* Do we need dst entry? */
- kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_KMEMCHECK
- /*
- * kmemcheck wants to track the status of each byte in a page; this
- * is a pointer to such a status block. NULL if not tracked.
- */
- void *shadow;
-#endif
-
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
#include <linux/random.h>
#include <linux/wait.h>
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
#include <linux/once.h>
#include <linux/fs.h>
struct socket {
socket_state state;
- kmemcheck_bitfield_begin(type);
short type;
- kmemcheck_bitfield_end(type);
unsigned long flags;
#ifndef _LINUX_RING_BUFFER_H
#define _LINUX_RING_BUFFER_H
-#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
* Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
- kmemcheck_bitfield_begin(bitfield);
u32 type_len:5, time_delta:27;
- kmemcheck_bitfield_end(bitfield);
u32 array[];
};
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/bug.h>
/* Following fields are _not_ copied in __copy_skb_header()
* Note that queue_mapping is here mostly to fill a hole.
*/
- kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
/* if you move cloned around you also must adapt those constants */
head_frag:1,
xmit_more:1,
__unused:1; /* one bit hole */
- kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
#define _INET_SOCK_H
#include <linux/bitops.h>
-#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jhash.h>
#define ireq_state req.__req_common.skc_state
#define ireq_family req.__req_common.skc_family
- kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
ecn_ok : 1,
acked : 1,
no_srccheck: 1;
- kmemcheck_bitfield_end(flags);
u32 ir_mark;
union {
struct ip_options_rcu __rcu *ireq_opt;
#ifndef _INET_TIMEWAIT_SOCK_
#define _INET_TIMEWAIT_SOCK_
-
-#include <linux/kmemcheck.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/types.h>
/* Socket demultiplex comparisons on incoming packets. */
/* these three are in inet_sock */
__be16 tw_sport;
- kmemcheck_bitfield_begin(flags);
/* And these are ours. */
unsigned int tw_kill : 1,
tw_transparent : 1,
tw_flowlabel : 20,
tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
- kmemcheck_bitfield_end(flags);
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
};
#define SK_FL_TYPE_MASK 0xffff0000
#endif
- kmemcheck_bitfield_begin(flags);
unsigned int sk_padding : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_protocol : 8,
sk_type : 16;
#define SK_PROTOCOL_MAX U8_MAX
- kmemcheck_bitfield_end(flags);
-
u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
-#include <linux/kmemcheck.h>
#include <linux/sfi.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
if (fp == NULL)
return NULL;
- kmemcheck_annotate_bitfield(fp, meta);
-
aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
if (aux == NULL) {
vfree(fp);
if (fp == NULL) {
__bpf_prog_uncharge(fp_old->aux->user, delta);
} else {
- kmemcheck_annotate_bitfield(fp, meta);
-
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = pages;
fp->aux->prog = fp;
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
if (fp != NULL) {
- kmemcheck_annotate_bitfield(fp, meta);
-
/* aux->prog still points to the fp_other one, so
* when promoting the clone to the real program,
* this still needs to be adapted.
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
-#include <linux/kmemcheck.h>
#include <linux/random.h>
#include <linux/jhash.h>
{
int i;
- kmemcheck_mark_initialized(lock, sizeof(*lock));
-
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
lock->class_cache[i] = NULL;
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kthread.h> /* for self test */
-#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
}
event = __rb_page_index(tail_page, tail);
- kmemcheck_annotate_bitfield(event, bitfield);
/* account for padding bytes */
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
/* We reserved something on the buffer */
event = __rb_page_index(tail_page, tail);
- kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(cpu_buffer, event, info);
local_inc(&tail_page->entries);
#include <linux/atomic.h>
#include <linux/kasan.h>
-#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
#include <linux/memory_hotplug.h>
{
u32 old_csum = object->checksum;
- if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
- return false;
-
kasan_disable_current();
object->checksum = crc32(0, (void *)object->pointer, object->size);
kasan_enable_current();
if (scan_should_stop())
break;
- /* don't scan uninitialized memory */
- if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
- BYTES_PER_POINTER))
- continue;
-
kasan_disable_current();
pointer = *ptr;
kasan_enable_current();
#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/module.h>
#include <linux/suspend.h>
VM_BUG_ON_PAGE(PageTail(page), page);
trace_mm_page_free(page, order);
- kmemcheck_free_shadow(page, order);
/*
* Check tail pages before head page information is cleared to
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
-#ifdef CONFIG_KMEMCHECK
- /*
- * Split shadow pages too, because free(page[0]) would
- * otherwise free the whole shadow.
- */
- if (kmemcheck_page_is_tracked(page))
- split_page(virt_to_page(page[0].shadow), order);
-#endif
-
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
split_page_owner(page, order);
page = NULL;
}
- if (kmemcheck_enabled && page)
- kmemcheck_pagealloc_alloc(page, order, gfp_mask);
-
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
return page;
#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
-#include <linux/kmemcheck.h>
#include <linux/memory.h>
#include <linux/prefetch.h>
#include <linux/sched/task_stack.h>
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
- if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
- kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
-
- if (cachep->ctor)
- kmemcheck_mark_uninitialized_pages(page, nr_pages);
- else
- kmemcheck_mark_unallocated_pages(page, nr_pages);
- }
-
return page;
}
int order = cachep->gfporder;
unsigned long nr_freed = (1 << order);
- kmemcheck_free_shadow(page, order);
-
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
else
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
- kmemcheck_slab_free(cachep, objp, cachep->object_size);
-
/*
* Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which
#include <linux/memcontrol.h>
#include <linux/fault-inject.h>
-#include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/random.h>
for (i = 0; i < size; i++) {
void *object = p[i];
- kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags);
kasan_slab_alloc(s, object, flags);
#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
-#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
unsigned long flags;
local_irq_save(flags);
- kmemcheck_slab_free(s, x, s->object_size);
debug_check_no_locks_freed(x, s->object_size);
local_irq_restore(flags);
}
stat(s, ORDER_FALLBACK);
}
- if (kmemcheck_enabled &&
- !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
- int pages = 1 << oo_order(oo);
-
- kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
-
- /*
- * Objects from caches that have a constructor don't get
- * cleared when they're allocated, so we need to do it here.
- */
- if (s->ctor)
- kmemcheck_mark_uninitialized_pages(page, pages);
- else
- kmemcheck_mark_unallocated_pages(page, pages);
- }
-
page->objects = oo_objects(oo);
order = compound_order(page);
check_object(s, page, p, SLUB_RED_INACTIVE);
}
- kmemcheck_free_shadow(page, compound_order(page));
-
mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- kmemcheck_annotate_variable(shinfo->destructor_arg);
if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff_fclones *fclones;
fclones = container_of(skb, struct sk_buff_fclones, skb1);
- kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG;
refcount_set(&fclones->fclone_ref, 1);
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- kmemcheck_annotate_variable(shinfo->destructor_arg);
return skb;
}
if (!n)
return NULL;
- kmemcheck_annotate_bitfield(n, flags1);
n->fclone = SKB_FCLONE_UNAVAILABLE;
}
sk = kmalloc(prot->obj_size, priority);
if (sk != NULL) {
- kmemcheck_annotate_bitfield(sk, flags);
-
if (security_sk_alloc(sk, family, priority))
goto out_free;
*/
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/inet_hashtables.h>
if (tw) {
const struct inet_sock *inet = inet_sk(sk);
- kmemcheck_annotate_bitfield(tw, flags);
-
tw->tw_dr = dr;
/* Give us an identity. */
tw->tw_daddr = inet->inet_daddr;
if (req) {
struct inet_request_sock *ireq = inet_rsk(req);
- kmemcheck_annotate_bitfield(ireq, flags);
ireq->ireq_opt = NULL;
#if IS_ENABLED(CONFIG_IPV6)
ireq->pktopts = NULL;
sock = SOCKET_I(inode);
- kmemcheck_annotate_bitfield(sock, type);
inode->i_ino = get_next_ino();
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();