#define GUEST_PL 1
-/* Every guest maps the core switcher code. */
-#define SHARED_SWITCHER_PAGES \
- DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
-/* Pages for switcher itself, then two pages per cpu */
-#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
+/* Page for Switcher text itself, then two pages per cpu */
+#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
/* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */
#ifdef CONFIG_X86_PAE
* easy.
*/
+ /* We assume Switcher text fits into a single page. */
+ if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
+ printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
+ end_switcher_text - start_switcher_text);
+ return -EINVAL;
+ }
+
/*
* We allocate an array of struct page pointers. map_vm_area() wants
* this, rather than just an array of pages.
goto out;
/* Now we set up the pagetable implementation for the Guests. */
- err = init_pagetables(switcher_pages, SHARED_SWITCHER_PAGES);
+ err = init_pagetables(switcher_pages);
if (err)
goto unmap;
/*H:520
* Setting up the Switcher PTE page for given CPU is fairly easy, given
- * the CPU number and the "struct page"s for the Switcher code itself.
- *
- * Currently the Switcher is less than a page long, so "pages" is always 1.
+ * the CPU number and the "struct page"s for the Switcher and per-cpu pages.
*/
static __init void populate_switcher_pte_page(unsigned int cpu,
- struct page *switcher_pages[],
- unsigned int pages)
+ struct page *switcher_pages[])
{
- unsigned int i;
pte_t *pte = switcher_pte_page(cpu);
+ int i;
- /* The first entries are easy: they map the Switcher code. */
- for (i = 0; i < pages; i++) {
- set_pte(&pte[i], mk_pte(switcher_pages[i],
+ /* The first entries maps the Switcher code. */
+ set_pte(&pte[0], mk_pte(switcher_pages[0],
__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
- }
/* The only other thing we map is this CPU's pair of pages. */
- i = pages + cpu*2;
+ i = 1 + cpu*2;
/* First page (Guest registers) is writable from the Guest */
set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_pages[i]),
* At boot or module load time, init_pagetables() allocates and populates
* the Switcher PTE page for each CPU.
*/
-__init int init_pagetables(struct page **switcher_pages, unsigned int pages)
+__init int init_pagetables(struct page **switcher_pages)
{
unsigned int i;
free_switcher_pte_pages();
return -ENOMEM;
}
- populate_switcher_pte_page(i, switcher_pages, pages);
+ populate_switcher_pte_page(i, switcher_pages);
}
return 0;
}
return switcher_addr - (unsigned long)start_switcher_text;
}
-/* This cpu's struct lguest_pages. */
+/* This cpu's struct lguest_pages (after the Switcher text page) */
static struct lguest_pages *lguest_pages(unsigned int cpu)
{
- return &(((struct lguest_pages *)
- (switcher_addr + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
+ return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
}
static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);