powerpc/mm: Remove the dependency on pte bit position in asm code
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Tue, 1 Dec 2015 03:36:44 +0000 (09:06 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 14 Dec 2015 04:19:10 +0000 (15:19 +1100)
We should not expect pte bit position in asm code. Simply
by moving part of that to C

Acked-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/mm/hash_utils_64.c

index 1a03142a69fd12a426abe751c6e884c34685832e..3419cbf2ad5965d4f5858fdbc2020672ecd6c846 100644 (file)
@@ -1556,29 +1556,19 @@ do_hash_page:
        lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
        andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
        bne     77f                     /* then don't call hash_page now */
-       /*
-        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
-        * accessing a userspace segment (even from the kernel). We assume
-        * kernel addresses always have the high bit set.
-        */
-       rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
-       rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
-       orc     r0,r12,r0               /* MSR_PR | ~high_bit */
-       rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
-       ori     r4,r4,1                 /* add _PAGE_PRESENT */
-       rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
 
        /*
         * r3 contains the faulting address
-        * r4 contains the required access permissions
+        * r4 msr
         * r5 contains the trap number
         * r6 contains dsisr
         *
         * at return r3 = 0 for success, 1 for page fault, negative for error
         */
+        mr     r4,r12
        ld      r6,_DSISR(r1)
-       bl      hash_page               /* build HPTE if possible */
-       cmpdi   r3,0                    /* see if hash_page succeeded */
+       bl      __hash_page             /* build HPTE if possible */
+        cmpdi  r3,0                    /* see if __hash_page succeeded */
 
        /* Success */
        beq     fast_exc_return_irq     /* Return from exception on success */
index 995809911f17610dbc831a946f0304f1ea329765..30b7648e687a5d7c8f0a784f09a13266ba877cb5 100644 (file)
@@ -1206,6 +1206,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
 }
 EXPORT_SYMBOL_GPL(hash_page);
 
+int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
+               unsigned long dsisr)
+{
+       unsigned long access = _PAGE_PRESENT;
+       unsigned long flags = 0;
+       struct mm_struct *mm = current->mm;
+
+       if (REGION_ID(ea) == VMALLOC_REGION_ID)
+               mm = &init_mm;
+
+       if (dsisr & DSISR_NOHPTE)
+               flags |= HPTE_NOHPTE_UPDATE;
+
+       if (dsisr & DSISR_ISSTORE)
+               access |= _PAGE_RW;
+       /*
+        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+        * accessing a userspace segment (even from the kernel). We assume
+        * kernel addresses always have the high bit set.
+        */
+       if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
+               access |= _PAGE_USER;
+
+       if (trap == 0x400)
+               access |= _PAGE_EXEC;
+
+       return hash_page_mm(mm, ea, access, trap, flags);
+}
+
 void hash_preload(struct mm_struct *mm, unsigned long ea,
                  unsigned long access, unsigned long trap)
 {