void (*hugepage_invalidate)(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
- int psize, int ssize);
+ int psize, int ssize, int local);
/* special for kexec, to be called in real mode, linear mapping is
* destroyed as well */
void (*hpte_clear_all)(void);
int ssize, int local);
extern void flush_hash_range(unsigned long number, int local);
extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
- pmd_t *pmdp, unsigned int psize, int ssize);
+ pmd_t *pmdp, unsigned int psize, int ssize,
+ int local);
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
static void native_hugepage_invalidate(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
- int psize, int ssize)
+ int psize, int ssize, int local)
{
int i;
struct hash_pte *hptep;
* instruction compares entry_VA in tlb with the VA specified
* here
*/
- tlbie(vpn, psize, actual_psize, ssize, 0);
+ tlbie(vpn, psize, actual_psize, ssize, local);
}
local_irq_restore(flags);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
- pmd_t *pmdp, unsigned int psize, int ssize)
+ pmd_t *pmdp, unsigned int psize, int ssize, int local)
{
int i, max_hpte_count, valid;
unsigned long s_addr;
if (!hpte_slot_array)
return;
- if (ppc_md.hugepage_invalidate)
- return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
- psize, ssize);
+ if (ppc_md.hugepage_invalidate) {
+ ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
+ psize, ssize, local);
+ goto tm_abort;
+ }
/*
* No bluk hpte removal support, invalidate each entry
*/
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, vpn, psize,
- MMU_PAGE_16M, ssize, 0);
+ MMU_PAGE_16M, ssize, local);
+ }
+tm_abort:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Transactions are not aborted by tlbiel, only tlbie.
+ * Without, syncing a page back to a block device w/ PIO could pick up
+ * transactional data (bad!) so we force an abort here. Before the
+ * sync the page will be made read-only, which will flush_hash_page.
+ * BIG ISSUE here: if the kernel uses a page from userspace without
+ * unmapping it first, it may see the speculated version.
+ */
+ if (local && cpu_has_feature(CPU_FTR_TM) &&
+ current->thread.regs &&
+ MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ tm_enable();
+ tm_abort(TM_CAUSE_TLBI);
}
+#endif
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
*/
if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
- ssize);
+ ssize, local);
}
valid = hpte_valid(hpte_slot_array, index);
void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long old_pmd)
{
- int ssize;
+ int ssize, local = 0;
unsigned int psize;
unsigned long vsid;
+ const struct cpumask *tmp;
/* get the base page size,vsid and segment size */
#ifdef CONFIG_DEBUG_VM
ssize = mmu_kernel_ssize;
}
- return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize);
+ tmp = cpumask_of(smp_processor_id());
+ if (cpumask_equal(mm_cpumask(mm), tmp))
+ local = 1;
+
+ return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, local);
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
- int psize, int ssize)
+ int psize, int ssize, int local)
{
int i, index = 0;
unsigned long s_addr = addr;