static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
{
- return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0;
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
- _PAGE_WRITETHRU | _PAGE_ENDIAN | \
- _PAGE_USER | _PAGE_ACCESSED | \
- _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
- _PAGE_EXEC | _PAGE_HWEXEC)
+#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU | _PAGE_ENDIAN | \
+ _PAGE_USER | _PAGE_ACCESSED | \
+ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
+ _PAGE_EXEC | _PAGE_HWEXEC)
/*
* Note: the _PAGE_COHERENT bit automatically gets set in the hardware
* PTE if CONFIG_SMP is defined (hash_page does this); there is no need
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) {
pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline unsigned long pte_pgprot(pte_t pte)
+static inline pgprot_t pte_pgprot(pte_t pte)
{
- return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
+ return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");
#else
- *ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE);
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+ | (pte_val(pte) & ~_PAGE_HASHPTE));
#endif
}
#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
#define HAVE_PAGE_AGP
-#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \
- _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
- _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
+ _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
+ _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
/* PTEIDX nibble */
#define _PTEIDX_SECONDARY 0x8
#define _PTEIDX_GROUP_IX 0x7
return pte; }
static inline pte_t pte_mkspecial(pte_t pte) {
pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline unsigned long pte_pgprot(pte_t pte)
+static inline pgprot_t pte_pgprot(pte_t pte)
{
- return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
+ return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
}
/* Atomic PTE updates */
page = pte_page(pte);
if (!page_cache_get_speculative(page))
return 0;
- if (unlikely(pte != *ptep)) {
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(page);
return 0;
}
*nr -= refs;
return 0;
}
- if (unlikely(pte != *ptep)) {
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
/* Could be optimized better */
while (*nr) {
put_page(page);
pgd_t pgd = *pgdp;
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
- pr_debug(" %016lx: normal pgd %p\n", addr, (void *)pgd);
+ pr_debug(" %016lx: normal pgd %p\n", addr,
+ (void *)pgd_val(pgd));
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
void create_section_mapping(unsigned long start, unsigned long end)
{
BUG_ON(htab_bolt_mapping(start, end, __pa(start),
- PAGE_KERNEL, mmu_linear_psize,
+ pgprot_val(PAGE_KERNEL), mmu_linear_psize,
mmu_kernel_ssize));
}
mtspr(SPRN_SDR1, _SDR1);
}
- prot = PAGE_KERNEL;
+ prot = pgprot_val(PAGE_KERNEL);
#ifdef CONFIG_DEBUG_PAGEALLOC
linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
- PAGE_KERNEL, mmu_vmemmap_psize,
- mmu_kernel_ssize);
+ pgprot_val(PAGE_KERNEL),
+ mmu_vmemmap_psize, mmu_kernel_ssize);
BUG_ON(mapped < 0);
}