brgez,a,pn %g5, tsb_do_fault
TSB_STORE(%g1, %g7)
- /* If it is larger than the base page size, don't
- * bother putting it into the TSB.
- */
- sethi %hi(_PAGE_ALL_SZ_BITS), %g7
- ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
- and %g5, %g7, %g2
- sethi %hi(_PAGE_SZBITS), %g7
- ldx [%g7 + %lo(_PAGE_SZBITS)], %g7
- cmp %g2, %g7
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- bne,a,pn %xcc, tsb_tlb_reload
- TSB_STORE(%g1, %g7)
-
TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct mm_struct *mm;
+ struct tsb *tsb;
+ unsigned long tag;
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
}
mm = vma->vm_mm;
- if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
- struct tsb *tsb;
- unsigned long tag;
-
- tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
- (mm->context.tsb_nentries - 1UL)];
- tag = (address >> 22UL);
- tsb_insert(tsb, tag, pte_val(pte));
- }
+ tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
+ (mm->context.tsb_nentries - 1UL)];
+ tag = (address >> 22UL);
+ tsb_insert(tsb, tag, pte_val(pte));
}
void flush_dcache_page(struct page *page)