#include <linux/prefetch.h>
#include <linux/memcontrol.h>
+#ifdef CONFIG_RKP_KDP
+#include <linux/security.h>
+
+spinlock_t ro_pages_lock = __SPIN_LOCK_UNLOCKED();
+
+#define check_cred_cache(s,r) \
+do { \
+ if ((s->name) && !strcmp(s->name,"cred_jar_ro")) \
+ return r; \
+} while (0)
+#else
+#define check_cred_cache(s,r)
+#endif /* CONFIG_RKP_KDP */
+
+#ifdef CONFIG_SLUB_DEBUG
+#define SLUB_BUG() BUG()
+#endif
+
#include <trace/events/kmem.h>
#include "internal.h"
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
+#ifdef CONFIG_RKP_KDP
+ if (rkp_cred_enable && s->name && !strcmp(s->name, "cred_jar_ro")) {
+ rkp_call(RKP_CMDID(0x44),(unsigned long long) object, (unsigned long long) s->offset,
+ (unsigned long long) fp,0,0);
+ }
+ else
+#endif /*CONFIG_RKP_KDP*/
*(void **)(object + s->offset) = fp;
}
__bit_spin_unlock(PG_locked, &page->flags);
}
+static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
+{
+ struct page tmp;
+ tmp.counters = counters_new;
+ /*
+ * page->counters can cover frozen/inuse/objects as well
+ * as page->_count. If we assign to ->counters directly
+ * we run the risk of losing updates to page->_count, so
+ * be careful and only assign to the fields we need.
+ */
+ page->frozen = tmp.frozen;
+ page->inuse = tmp.inuse;
+ page->objects = tmp.objects;
+}
+
/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
page->freelist = freelist_new;
- page->counters = counters_new;
+ set_page_slub_counters(page, counters_new);
slab_unlock(page);
return 1;
}
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
page->freelist = freelist_new;
- page->counters = counters_new;
+ set_page_slub_counters(page, counters_new);
slab_unlock(page);
local_irq_restore(flags);
return 1;
void *p;
void *addr = page_address(page);
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s, );
+#endif /* CONFIG_RKP_KDP */
for (p = page->freelist; p; p = get_freepointer(s, p))
set_bit(slab_index(p, s, addr), map);
}
{
struct track *p = get_track(s, object, alloc);
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s, );
+#endif /* CONFIG_RKP_KDP */
if (addr) {
#ifdef CONFIG_STACKTRACE
struct stack_trace trace;
{
u8 *p = object;
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s, );
+#endif /* CONFIG_RKP_KDP */
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->object_size - 1);
p[s->object_size - 1] = POISON_END;
u8 *fault;
u8 *end;
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s,1);
+#endif /* CONFIG_RKP_KDP */
fault = memchr_inv(start, value, bytes);
if (!fault)
return 1;
if (!(s->flags & SLAB_POISON))
return 1;
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s,1);
+#endif /* CONFIG_RKP_KDP */
start = page_address(page);
length = (PAGE_SIZE << compound_order(page)) - s->reserved;
end = start + length;
slab_err(s, page, "Not a valid slab page");
return 0;
}
-
+#ifdef CONFIG_RKP_KDP
+ /*
+ * Skip this function for now
+ */
+ if (s->name && !strcmp(s->name, "cred_jar_ro"))
+ return 1;
+#endif /*CONFIG_RKP_KDP*/
maxobj = order_objects(compound_order(page), s->size, s->reserved);
if (page->objects > maxobj) {
slab_err(s, page, "objects %u > max %u",
unsigned long max_objects;
fp = page->freelist;
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s,0);
+#endif /* CONFIG_RKP_KDP */
while (fp && nr <= page->objects) {
if (fp == search)
return 1;
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s, );
+#endif /* CONFIG_RKP_KDP */
if (!(s->flags & SLAB_STORE_USER))
return;
*/
static void remove_full(struct kmem_cache *s, struct page *page)
{
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s, );
+#endif /* CONFIG_RKP_KDP */
if (!(s->flags & SLAB_STORE_USER))
return;
static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
void *object, unsigned long addr)
{
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s,0);
+#endif /* CONFIG_RKP_KDP */
if (!check_slab(s, page))
goto bad;
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+#ifdef CONFIG_RKP_KDP
+ check_cred_cache(s,NULL);
+#endif /* CONFIG_RKP_KDP */
spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page);
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (slub_debug && (!slub_debug_slabs ||
- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+#ifdef CONFIG_RKP_KDP
+ return flags;
+#else
+ if (slub_debug && (!slub_debug_slabs || (name &&
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
flags |= slub_debug;
return flags;
+#endif
}
#else
static inline void setup_object_debug(struct kmem_cache *s,
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
+#ifdef CONFIG_RKP_KDP
+ void *virt_page = NULL;
+#endif /*CONFIG_RKP_KDP*/
gfp_t alloc_gfp;
flags &= gfp_allowed_mask;
*/
alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+#ifdef CONFIG_RKP_KDP
+ if (s->name && !strcmp(s->name, "cred_jar_ro")) {
+
+ virt_page = rkp_ro_alloc();
+ if(!virt_page)
+ goto def_alloc;
+
+ page = virt_to_page(virt_page);
+ oo = s->min;
+ } else {
+def_alloc:
+#endif /*CONFIG_RKP_KDP*/
page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
stat(s, ORDER_FALLBACK);
}
+#ifdef CONFIG_RKP_KDP
+ }
+#endif /*CONFIG_RKP_KDP*/
if (kmemcheck_enabled && page
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));
+#ifdef CONFIG_RKP_KDP
+ /*
+ * We modify the following so that slab alloc for protected data
+ * types are allocated from our own pool.
+ */
+ if (s->name && !strcmp(s->name, "cred_jar_ro")) {
+ unsigned long long sc = 0;
+ unsigned long long va_page = (unsigned long long)__va(page_to_phys(page));
+ for(; sc < (1 << oo_order(oo)) ; sc++) {
+ rkp_call(RKP_CMDID(0x50),va_page,0,0,0,0);
+ va_page += PAGE_SIZE;
+ }
+
+ }
+#endif
return page;
}
if (unlikely(s->ctor))
s->ctor(object);
}
-
+#ifdef CONFIG_RKP_DMAP_PROT
+extern u8 rkp_started;
+#endif
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
}
setup_object(s, page, last);
set_freepointer(s, last, NULL);
-
+#ifdef CONFIG_RKP_DMAP_PROT
+ if(rkp_cred_enable && rkp_started) {
+ rkp_call(RKP_CMDID(0x4a),page_to_phys(page),compound_order(page),
+ 1,(unsigned long) __pa(rkp_map_bitmap),0);
+ }
+#endif
page->freelist = start;
page->inuse = page->objects;
page->frozen = 1;
out:
return page;
}
+#ifdef CONFIG_RKP_KDP
+extern unsigned int is_rkp_ro_page(u64 addr);
+void free_ro_pages(struct page *page, int order)
+{
+ unsigned long flags;
+ unsigned long long sc,va_page;
+ sc = 0;
+ va_page = (unsigned long long)__va(page_to_phys(page));
+ if(is_rkp_ro_page(va_page)){
+ rkp_call(RKP_CMDID(0x48),va_page,0,0,0,0);
+ rkp_ro_free((void *)va_page);
+ return;
+ }
+
+ spin_lock_irqsave(&ro_pages_lock,flags);
+ for(; sc < (1 << order); sc++) {
+ rkp_call(RKP_CMDID(0x48),va_page,0,0,0,0);
+ va_page += PAGE_SIZE;
+ }
+ __free_memcg_kmem_pages(page, order);
+ spin_unlock_irqrestore(&ro_pages_lock,flags);
+}
+#endif /*CONFIG_RKP_KDP*/
static void __free_slab(struct kmem_cache *s, struct page *page)
{
int order = compound_order(page);
int pages = 1 << order;
+#ifdef CONFIG_RKP_DMAP_PROT
+ if(rkp_cred_enable && rkp_started) {
+ rkp_call(RKP_CMDID(0x4a),page_to_phys(page),compound_order(page),
+ 0,(unsigned long)__pa(rkp_map_bitmap),0);
+ }
+#endif
+
if (kmem_cache_debug(s)) {
void *p;
page_mapcount_reset(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
+
+#ifdef CONFIG_RKP_KDP
+ /* We free the protected pages here. */
+ if (s->name && !strcmp(s->name, "cred_jar_ro"))
+ free_ro_pages(page, order);
+ else
+#endif
__free_memcg_kmem_pages(page, order);
}
p->slab_cache = s;
#ifdef CONFIG_SLUB_DEBUG
+#ifndef CONFIG_RKP_KDP
list_for_each_entry(p, &n->full, lru)
p->slab_cache = s;
+#endif /*CONFIG_RKP_KDP*/
#endif
}
}
page = ACCESS_ONCE(c->partial);
if (page) {
- x = page->pobjects;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ WARN_ON_ONCE(1);
+ else if (flags & SO_OBJECTS)
+ WARN_ON_ONCE(1);
+ else
+ x = page->pages;
total += x;
nodes[node] += x;
}