Commit | Line | Data |
---|---|---|
2855b970 RZ |
1 | /* |
2 | * linux/arch/m68k/mm/cache.c | |
3 | * | |
4 | * Instruction cache handling | |
5 | * | |
6 | * Copyright (C) 1995 Hamish Macdonald | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <asm/pgalloc.h> | |
11 | #include <asm/traps.h> | |
12 | ||
13 | ||
14 | static unsigned long virt_to_phys_slow(unsigned long vaddr) | |
15 | { | |
16 | if (CPU_IS_060) { | |
17 | unsigned long paddr; | |
18 | ||
19 | /* The PLPAR instruction causes an access error if the translation | |
20 | * is not possible. To catch this we use the same exception mechanism | |
21 | * as for user space accesses in <asm/uaccess.h>. */ | |
22 | asm volatile (".chip 68060\n" | |
23 | "1: plpar (%0)\n" | |
24 | ".chip 68k\n" | |
25 | "2:\n" | |
26 | ".section .fixup,\"ax\"\n" | |
27 | " .even\n" | |
28 | "3: sub.l %0,%0\n" | |
29 | " jra 2b\n" | |
30 | ".previous\n" | |
31 | ".section __ex_table,\"a\"\n" | |
32 | " .align 4\n" | |
33 | " .long 1b,3b\n" | |
34 | ".previous" | |
35 | : "=a" (paddr) | |
36 | : "0" (vaddr)); | |
37 | return paddr; | |
38 | } else if (CPU_IS_040) { | |
39 | unsigned long mmusr; | |
40 | ||
41 | asm volatile (".chip 68040\n\t" | |
42 | "ptestr (%1)\n\t" | |
43 | "movec %%mmusr, %0\n\t" | |
44 | ".chip 68k" | |
45 | : "=r" (mmusr) | |
46 | : "a" (vaddr)); | |
47 | ||
48 | if (mmusr & MMU_R_040) | |
49 | return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | |
50 | } else { | |
51 | unsigned short mmusr; | |
52 | unsigned long *descaddr; | |
53 | ||
54 | asm volatile ("ptestr %3,%2@,#7,%0\n\t" | |
55 | "pmove %%psr,%1@" | |
56 | : "=a&" (descaddr) | |
57 | : "a" (&mmusr), "a" (vaddr), "d" (get_fs().seg)); | |
58 | if (mmusr & (MMU_I|MMU_B|MMU_L)) | |
59 | return 0; | |
60 | descaddr = phys_to_virt((unsigned long)descaddr); | |
61 | switch (mmusr & MMU_NUM) { | |
62 | case 1: | |
63 | return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); | |
64 | case 2: | |
65 | return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); | |
66 | case 3: | |
67 | return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | |
68 | } | |
69 | } | |
70 | return 0; | |
71 | } | |
72 | ||
73 | /* Push n pages at kernel virtual address and clear the icache */ | |
74 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ | |
75 | void flush_icache_range(unsigned long address, unsigned long endaddr) | |
76 | { | |
77 | ||
78 | if (CPU_IS_040_OR_060) { | |
79 | address &= PAGE_MASK; | |
80 | ||
81 | do { | |
82 | asm volatile ("nop\n\t" | |
83 | ".chip 68040\n\t" | |
84 | "cpushp %%bc,(%0)\n\t" | |
85 | ".chip 68k" | |
86 | : : "a" (virt_to_phys_slow(address))); | |
87 | address += PAGE_SIZE; | |
88 | } while (address < endaddr); | |
89 | } else { | |
90 | unsigned long tmp; | |
91 | asm volatile ("movec %%cacr,%0\n\t" | |
92 | "orw %1,%0\n\t" | |
93 | "movec %0,%%cacr" | |
94 | : "=&d" (tmp) | |
95 | : "di" (FLUSH_I)); | |
96 | } | |
97 | } | |
98 | EXPORT_SYMBOL(flush_icache_range); | |
99 | ||
100 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |
101 | unsigned long addr, int len) | |
102 | { | |
103 | if (CPU_IS_040_OR_060) { | |
104 | asm volatile ("nop\n\t" | |
105 | ".chip 68040\n\t" | |
106 | "cpushp %%bc,(%0)\n\t" | |
107 | ".chip 68k" | |
108 | : : "a" (page_to_phys(page))); | |
109 | } else { | |
110 | unsigned long tmp; | |
111 | asm volatile ("movec %%cacr,%0\n\t" | |
112 | "orw %1,%0\n\t" | |
113 | "movec %0,%%cacr" | |
114 | : "=&d" (tmp) | |
115 | : "di" (FLUSH_I)); | |
116 | } | |
117 | } | |
118 |