#define MMU_UPDATE_HISTO 30
+/*
+ * Protects atomic reservation decrease/increase against concurrent increases.
+ * Also protects non-atomic updates of current_pages and driver_pages, and
+ * balloon lists.
+ */
+DEFINE_SPINLOCK(xen_reservation_lock);
+
#ifdef CONFIG_XEN_DEBUG_FS
static struct {
static int register_balloon(struct sys_device *sysdev);
-/*
- * Protects atomic reservation decrease/increase against concurrent increases.
- * Also protects non-atomic updates of current_pages and driver_pages, and
- * balloon lists.
- */
-static DEFINE_SPINLOCK(balloon_lock);
-
static struct balloon_stats balloon_stats;
/* We increase/decrease in batches which fit in a page */
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
- spin_lock_irqsave(&balloon_lock, flags);
+ spin_lock_irqsave(&xen_reservation_lock, flags);
page = balloon_first_page();
for (i = 0; i < nr_pages; i++) {
balloon_stats.current_pages += rc;
out:
- spin_unlock_irqrestore(&balloon_lock, flags);
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
return rc < 0 ? rc : rc != nr_pages;
}
kmap_flush_unused();
flush_tlb_all();
- spin_lock_irqsave(&balloon_lock, flags);
+ spin_lock_irqsave(&xen_reservation_lock, flags);
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
balloon_stats.current_pages -= nr_pages;
- spin_unlock_irqrestore(&balloon_lock, flags);
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
return need_sleep;
}
#ifndef __XEN_PUBLIC_MEMORY_H__
#define __XEN_PUBLIC_MEMORY_H__
+#include <linux/spinlock.h>
+
/*
* Increase or decrease the specified domain's memory reservation. Returns a
* -ve errcode on failure, or the # extents successfully allocated or freed.
};
DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
+
+/*
+ * Prevent the balloon driver from changing the memory reservation
+ * during a driver critical region.
+ */
+extern spinlock_t xen_reservation_lock;
#endif /* __XEN_PUBLIC_MEMORY_H__ */