extern long mol_trampoline;
EXPORT_SYMBOL(mol_trampoline); /* For MOL */
EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
-
-extern struct hash_pte *Hash;
-extern unsigned long _SDR1;
-EXPORT_SYMBOL_GPL(Hash); /* For KVM */
-EXPORT_SYMBOL_GPL(_SDR1); /* For KVM */
#ifdef CONFIG_SMP
extern int mmu_hash_lock;
EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
#error Only 32 bit pages are supported for now
#endif
+static ulong htab;
+static u32 htabmask;
+
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
volatile u32 *pteg;
return NULL;
}
-extern struct hash_pte *Hash;
-extern unsigned long _SDR1;
-
static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
bool primary)
{
- u32 page, hash, htabmask;
- ulong pteg = (ulong)Hash;
+ u32 page, hash;
+ ulong pteg = htab;
page = (eaddr & ~ESID_MASK) >> 12;
if (!primary)
hash = ~hash;
- htabmask = ((_SDR1 & 0x1FF) << 16) | 0xFFC0;
hash &= htabmask;
pteg |= hash;
- dprintk_mmu("htab: %p | hash: %x | htabmask: %x | pteg: %lx\n",
- Hash, hash, htabmask, pteg);
+ dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
+ htab, hash, htabmask, pteg);
return (u32*)pteg;
}
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err;
+ ulong sdr1;
err = __init_new_context();
if (err < 0)
vcpu3s->vsid_next = vcpu3s->vsid_first;
+ /* Remember where the HTAB is */
+ asm ( "mfsdr1 %0" : "=r"(sdr1) );
+ htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
+ htab = (ulong)__va(sdr1 & 0xffff0000);
+
return 0;
}