powerpc: Rename USER_ESID_BITS* to ESID_BITS*
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Wed, 13 Mar 2013 03:34:55 +0000 (03:34 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Sun, 17 Mar 2013 01:45:44 +0000 (12:45 +1100)
Now we use ESID_BITS of kernel address to build proto vsid. So rename
USER_ESIT_BITS to ESID_BITS

Acked-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: <stable@vger.kernel.org> [v3.8]
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/slb_low.S

index a32461f9d8256ec0833c15208899272642ef2ee8..b59e06f507ea6c3f3af39688752e20a23096b666 100644 (file)
@@ -378,12 +378,12 @@ extern void slb_set_size(u16 size);
  */
 
 #define CONTEXT_BITS           19
-#define USER_ESID_BITS         18
-#define USER_ESID_BITS_1T      6
+#define ESID_BITS              18
+#define ESID_BITS_1T           6
 
 /*
  * 256MB segment
- * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
  * available for user + kernel mapping. The top 4 contexts are used for
  * kernel mapping. Each segment contains 2^28 bytes. Each
  * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
@@ -396,15 +396,15 @@ extern void slb_set_size(u16 size);
  * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
  */
 #define VSID_MULTIPLIER_256M   ASM_CONST(12538073)     /* 24-bit prime */
-#define VSID_BITS_256M         (CONTEXT_BITS + USER_ESID_BITS)
+#define VSID_BITS_256M         (CONTEXT_BITS + ESID_BITS)
 #define VSID_MODULUS_256M      ((1UL<<VSID_BITS_256M)-1)
 
 #define VSID_MULTIPLIER_1T     ASM_CONST(12538073)     /* 24-bit prime */
-#define VSID_BITS_1T           (CONTEXT_BITS + USER_ESID_BITS_1T)
+#define VSID_BITS_1T           (CONTEXT_BITS + ESID_BITS_1T)
 #define VSID_MODULUS_1T                ((1UL<<VSID_BITS_1T)-1)
 
 
-#define USER_VSID_RANGE        (1UL << (USER_ESID_BITS + SID_SHIFT))
+#define USER_VSID_RANGE        (1UL << (ESID_BITS + SID_SHIFT))
 
 /*
  * This macro generates asm code to compute the VSID scramble
@@ -540,9 +540,9 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
                return 0;
 
        if (ssize == MMU_SEGSIZE_256M)
-               return vsid_scramble((context << USER_ESID_BITS)
+               return vsid_scramble((context << ESID_BITS)
                                     | (ea >> SID_SHIFT), 256M);
-       return vsid_scramble((context << USER_ESID_BITS_1T)
+       return vsid_scramble((context << ESID_BITS_1T)
                             | (ea >> SID_SHIFT_1T), 1T);
 }
 
index b112359ea7a82f8df6cdc31e8ac4791d89631967..200afa5bcfb73da1efa111c5b22ad62b09b3bb74 100644 (file)
@@ -1472,7 +1472,7 @@ _GLOBAL(do_stab_bolted)
        addi    r9,r9,(MAX_USER_CONTEXT + 1)@l
 
        srdi    r10,r11,SID_SHIFT
-       rldimi  r10,r9,USER_ESID_BITS,0 /* proto vsid */
+       rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
        ASM_VSID_SCRAMBLE(r10, r9, 256M)
        rldic   r9,r10,12,16    /* r9 = vsid << 12 */
 
index ead58e3172945d81e863ba937cc1d88b93922558..5d7d29a313eb99d8581f7bf2e515421020628fbe 100644 (file)
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
        vcpu3s->context_id[0] = err;
 
        vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
-                                 << USER_ESID_BITS) - 1;
-       vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+                                 << ESID_BITS) - 1;
+       vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
        vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
 
        kvmppc_mmu_hpte_init(vcpu);
index e212a271c7a4bdadac512efd24da89de6c9ddd77..654258f165aeb9496ae96131c0f9b383927e8d20 100644 (file)
@@ -61,7 +61,7 @@
 #endif
 
 #ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
 #error TASK_SIZE_USER64 exceeds user VSID range
 #endif
 #endif
index 77aafaa1ab097886f30506ade05fb1c00d57f441..17aa6dfceb3498cbb0a09302485bdf1ab8049cbf 100644 (file)
@@ -232,7 +232,7 @@ _GLOBAL(slb_allocate_user)
  * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  */
 slb_finish_load:
-       rldimi  r10,r9,USER_ESID_BITS,0
+       rldimi  r10,r9,ESID_BITS,0
        ASM_VSID_SCRAMBLE(r10,r9,256M)
        /*
         * bits above VSID_BITS_256M need to be ignored from r10
@@ -301,7 +301,7 @@ _GLOBAL(slb_compare_rr_to_size)
  */
 slb_finish_load_1T:
        srdi    r10,r10,(SID_SHIFT_1T - SID_SHIFT)      /* get 1T ESID */
-       rldimi  r10,r9,USER_ESID_BITS_1T,0
+       rldimi  r10,r9,ESID_BITS_1T,0
        ASM_VSID_SCRAMBLE(r10,r9,1T)
        /*
         * bits above VSID_BITS_1T need to be ignored from r10