[MIPS] Oprofile: Support VSMP on 34K.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / mm / c-r4k.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10#include <linux/config.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16
17#include <asm/bcache.h>
18#include <asm/bootinfo.h>
ec74e361 19#include <asm/cache.h>
1da177e4
LT
20#include <asm/cacheops.h>
21#include <asm/cpu.h>
22#include <asm/cpu-features.h>
23#include <asm/io.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/r4kcache.h>
27#include <asm/system.h>
28#include <asm/mmu_context.h>
29#include <asm/war.h>
ba5187db 30#include <asm/cacheflush.h> /* for run_uncached() */
1da177e4 31
7f3f1d01
RB
32
33/*
34 * Special Variant of smp_call_function for use by cache functions:
35 *
36 * o No return value
37 * o collapses to normal function call on UP kernels
38 * o collapses to normal function call on systems with a single shared
39 * primary cache.
40 */
41static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
42 int retry, int wait)
43{
44 preempt_disable();
45
46#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
47 smp_call_function(func, info, retry, wait);
48#endif
49 func(info);
50 preempt_enable();
51}
52
ec74e361
RB
53/*
54 * Must die.
55 */
56static unsigned long icache_size __read_mostly;
57static unsigned long dcache_size __read_mostly;
58static unsigned long scache_size __read_mostly;
1da177e4
LT
59
60/*
61 * Dummy cache handling routines for machines without boardcaches
62 */
73f40352 63static void cache_noop(void) {}
1da177e4
LT
64
65static struct bcache_ops no_sc_ops = {
73f40352
CD
66 .bc_enable = (void *)cache_noop,
67 .bc_disable = (void *)cache_noop,
68 .bc_wback_inv = (void *)cache_noop,
69 .bc_inv = (void *)cache_noop
1da177e4
LT
70};
71
72struct bcache_ops *bcops = &no_sc_ops;
73
330cfe01
TS
74#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
75#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
1da177e4
LT
76
77#define R4600_HIT_CACHEOP_WAR_IMPL \
78do { \
79 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
80 *(volatile unsigned long *)CKSEG1; \
81 if (R4600_V1_HIT_CACHEOP_WAR) \
82 __asm__ __volatile__("nop;nop;nop;nop"); \
83} while (0)
84
85static void (*r4k_blast_dcache_page)(unsigned long addr);
86
87static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
88{
89 R4600_HIT_CACHEOP_WAR_IMPL;
90 blast_dcache32_page(addr);
91}
92
93static inline void r4k_blast_dcache_page_setup(void)
94{
95 unsigned long dc_lsize = cpu_dcache_line_size();
96
73f40352
CD
97 if (dc_lsize == 0)
98 r4k_blast_dcache_page = (void *)cache_noop;
99 else if (dc_lsize == 16)
1da177e4
LT
100 r4k_blast_dcache_page = blast_dcache16_page;
101 else if (dc_lsize == 32)
102 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
103}
104
105static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
106
107static inline void r4k_blast_dcache_page_indexed_setup(void)
108{
109 unsigned long dc_lsize = cpu_dcache_line_size();
110
73f40352
CD
111 if (dc_lsize == 0)
112 r4k_blast_dcache_page_indexed = (void *)cache_noop;
113 else if (dc_lsize == 16)
1da177e4
LT
114 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
115 else if (dc_lsize == 32)
116 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
117}
118
119static void (* r4k_blast_dcache)(void);
120
121static inline void r4k_blast_dcache_setup(void)
122{
123 unsigned long dc_lsize = cpu_dcache_line_size();
124
73f40352
CD
125 if (dc_lsize == 0)
126 r4k_blast_dcache = (void *)cache_noop;
127 else if (dc_lsize == 16)
1da177e4
LT
128 r4k_blast_dcache = blast_dcache16;
129 else if (dc_lsize == 32)
130 r4k_blast_dcache = blast_dcache32;
131}
132
133/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
134#define JUMP_TO_ALIGN(order) \
135 __asm__ __volatile__( \
136 "b\t1f\n\t" \
137 ".align\t" #order "\n\t" \
138 "1:\n\t" \
139 )
140#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
141#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
142
143static inline void blast_r4600_v1_icache32(void)
144{
145 unsigned long flags;
146
147 local_irq_save(flags);
148 blast_icache32();
149 local_irq_restore(flags);
150}
151
152static inline void tx49_blast_icache32(void)
153{
154 unsigned long start = INDEX_BASE;
155 unsigned long end = start + current_cpu_data.icache.waysize;
156 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
157 unsigned long ws_end = current_cpu_data.icache.ways <<
158 current_cpu_data.icache.waybit;
159 unsigned long ws, addr;
160
161 CACHE32_UNROLL32_ALIGN2;
162 /* I'm in even chunk. blast odd chunks */
42a3b4f2
RB
163 for (ws = 0; ws < ws_end; ws += ws_inc)
164 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
1da177e4
LT
165 cache32_unroll32(addr|ws,Index_Invalidate_I);
166 CACHE32_UNROLL32_ALIGN;
167 /* I'm in odd chunk. blast even chunks */
42a3b4f2
RB
168 for (ws = 0; ws < ws_end; ws += ws_inc)
169 for (addr = start; addr < end; addr += 0x400 * 2)
1da177e4
LT
170 cache32_unroll32(addr|ws,Index_Invalidate_I);
171}
172
173static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
174{
175 unsigned long flags;
176
177 local_irq_save(flags);
178 blast_icache32_page_indexed(page);
179 local_irq_restore(flags);
180}
181
182static inline void tx49_blast_icache32_page_indexed(unsigned long page)
183{
67a3f6de
AN
184 unsigned long indexmask = current_cpu_data.icache.waysize - 1;
185 unsigned long start = INDEX_BASE + (page & indexmask);
1da177e4
LT
186 unsigned long end = start + PAGE_SIZE;
187 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
188 unsigned long ws_end = current_cpu_data.icache.ways <<
189 current_cpu_data.icache.waybit;
190 unsigned long ws, addr;
191
192 CACHE32_UNROLL32_ALIGN2;
193 /* I'm in even chunk. blast odd chunks */
42a3b4f2
RB
194 for (ws = 0; ws < ws_end; ws += ws_inc)
195 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
1da177e4
LT
196 cache32_unroll32(addr|ws,Index_Invalidate_I);
197 CACHE32_UNROLL32_ALIGN;
198 /* I'm in odd chunk. blast even chunks */
42a3b4f2
RB
199 for (ws = 0; ws < ws_end; ws += ws_inc)
200 for (addr = start; addr < end; addr += 0x400 * 2)
1da177e4
LT
201 cache32_unroll32(addr|ws,Index_Invalidate_I);
202}
203
204static void (* r4k_blast_icache_page)(unsigned long addr);
205
206static inline void r4k_blast_icache_page_setup(void)
207{
208 unsigned long ic_lsize = cpu_icache_line_size();
209
73f40352
CD
210 if (ic_lsize == 0)
211 r4k_blast_icache_page = (void *)cache_noop;
212 else if (ic_lsize == 16)
1da177e4
LT
213 r4k_blast_icache_page = blast_icache16_page;
214 else if (ic_lsize == 32)
215 r4k_blast_icache_page = blast_icache32_page;
216 else if (ic_lsize == 64)
217 r4k_blast_icache_page = blast_icache64_page;
218}
219
220
221static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
222
223static inline void r4k_blast_icache_page_indexed_setup(void)
224{
225 unsigned long ic_lsize = cpu_icache_line_size();
226
73f40352
CD
227 if (ic_lsize == 0)
228 r4k_blast_icache_page_indexed = (void *)cache_noop;
229 else if (ic_lsize == 16)
1da177e4
LT
230 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
231 else if (ic_lsize == 32) {
02fe2c9c 232 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
1da177e4
LT
233 r4k_blast_icache_page_indexed =
234 blast_icache32_r4600_v1_page_indexed;
02fe2c9c
TS
235 else if (TX49XX_ICACHE_INDEX_INV_WAR)
236 r4k_blast_icache_page_indexed =
237 tx49_blast_icache32_page_indexed;
1da177e4
LT
238 else
239 r4k_blast_icache_page_indexed =
240 blast_icache32_page_indexed;
241 } else if (ic_lsize == 64)
242 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
243}
244
245static void (* r4k_blast_icache)(void);
246
247static inline void r4k_blast_icache_setup(void)
248{
249 unsigned long ic_lsize = cpu_icache_line_size();
250
73f40352
CD
251 if (ic_lsize == 0)
252 r4k_blast_icache = (void *)cache_noop;
253 else if (ic_lsize == 16)
1da177e4
LT
254 r4k_blast_icache = blast_icache16;
255 else if (ic_lsize == 32) {
256 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
257 r4k_blast_icache = blast_r4600_v1_icache32;
258 else if (TX49XX_ICACHE_INDEX_INV_WAR)
259 r4k_blast_icache = tx49_blast_icache32;
260 else
261 r4k_blast_icache = blast_icache32;
262 } else if (ic_lsize == 64)
263 r4k_blast_icache = blast_icache64;
264}
265
266static void (* r4k_blast_scache_page)(unsigned long addr);
267
268static inline void r4k_blast_scache_page_setup(void)
269{
270 unsigned long sc_lsize = cpu_scache_line_size();
271
4debe4f9 272 if (scache_size == 0)
73f40352 273 r4k_blast_scache_page = (void *)cache_noop;
4debe4f9 274 else if (sc_lsize == 16)
1da177e4
LT
275 r4k_blast_scache_page = blast_scache16_page;
276 else if (sc_lsize == 32)
277 r4k_blast_scache_page = blast_scache32_page;
278 else if (sc_lsize == 64)
279 r4k_blast_scache_page = blast_scache64_page;
280 else if (sc_lsize == 128)
281 r4k_blast_scache_page = blast_scache128_page;
282}
283
284static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
285
286static inline void r4k_blast_scache_page_indexed_setup(void)
287{
288 unsigned long sc_lsize = cpu_scache_line_size();
289
4debe4f9 290 if (scache_size == 0)
73f40352 291 r4k_blast_scache_page_indexed = (void *)cache_noop;
4debe4f9 292 else if (sc_lsize == 16)
1da177e4
LT
293 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
294 else if (sc_lsize == 32)
295 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
296 else if (sc_lsize == 64)
297 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
298 else if (sc_lsize == 128)
299 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
300}
301
302static void (* r4k_blast_scache)(void);
303
304static inline void r4k_blast_scache_setup(void)
305{
306 unsigned long sc_lsize = cpu_scache_line_size();
307
4debe4f9 308 if (scache_size == 0)
73f40352 309 r4k_blast_scache = (void *)cache_noop;
4debe4f9 310 else if (sc_lsize == 16)
1da177e4
LT
311 r4k_blast_scache = blast_scache16;
312 else if (sc_lsize == 32)
313 r4k_blast_scache = blast_scache32;
314 else if (sc_lsize == 64)
315 r4k_blast_scache = blast_scache64;
316 else if (sc_lsize == 128)
317 r4k_blast_scache = blast_scache128;
318}
319
320/*
321 * This is former mm's flush_cache_all() which really should be
322 * flush_cache_vunmap these days ...
323 */
324static inline void local_r4k_flush_cache_all(void * args)
325{
326 r4k_blast_dcache();
327 r4k_blast_icache();
328}
329
330static void r4k_flush_cache_all(void)
331{
332 if (!cpu_has_dc_aliases)
333 return;
334
7f3f1d01 335 r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
1da177e4
LT
336}
337
338static inline void local_r4k___flush_cache_all(void * args)
339{
340 r4k_blast_dcache();
341 r4k_blast_icache();
342
343 switch (current_cpu_data.cputype) {
344 case CPU_R4000SC:
345 case CPU_R4000MC:
346 case CPU_R4400SC:
347 case CPU_R4400MC:
348 case CPU_R10000:
349 case CPU_R12000:
44d921b2 350 case CPU_R14000:
1da177e4
LT
351 r4k_blast_scache();
352 }
353}
354
355static void r4k___flush_cache_all(void)
356{
7f3f1d01 357 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
1da177e4
LT
358}
359
360static inline void local_r4k_flush_cache_range(void * args)
361{
362 struct vm_area_struct *vma = args;
363 int exec;
364
365 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
366 return;
367
368 exec = vma->vm_flags & VM_EXEC;
369 if (cpu_has_dc_aliases || exec)
370 r4k_blast_dcache();
371 if (exec)
372 r4k_blast_icache();
373}
374
375static void r4k_flush_cache_range(struct vm_area_struct *vma,
376 unsigned long start, unsigned long end)
377{
7f3f1d01 378 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
1da177e4
LT
379}
380
381static inline void local_r4k_flush_cache_mm(void * args)
382{
383 struct mm_struct *mm = args;
384
385 if (!cpu_context(smp_processor_id(), mm))
386 return;
387
388 r4k_blast_dcache();
389 r4k_blast_icache();
390
391 /*
392 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
393 * only flush the primary caches but R10000 and R12000 behave sane ...
394 */
395 if (current_cpu_data.cputype == CPU_R4000SC ||
396 current_cpu_data.cputype == CPU_R4000MC ||
397 current_cpu_data.cputype == CPU_R4400SC ||
398 current_cpu_data.cputype == CPU_R4400MC)
399 r4k_blast_scache();
400}
401
402static void r4k_flush_cache_mm(struct mm_struct *mm)
403{
404 if (!cpu_has_dc_aliases)
405 return;
406
7f3f1d01 407 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
1da177e4
LT
408}
409
410struct flush_cache_page_args {
411 struct vm_area_struct *vma;
6ec25809 412 unsigned long addr;
de62893b 413 unsigned long pfn;
1da177e4
LT
414};
415
416static inline void local_r4k_flush_cache_page(void *args)
417{
418 struct flush_cache_page_args *fcp_args = args;
419 struct vm_area_struct *vma = fcp_args->vma;
6ec25809 420 unsigned long addr = fcp_args->addr;
de62893b 421 unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
1da177e4
LT
422 int exec = vma->vm_flags & VM_EXEC;
423 struct mm_struct *mm = vma->vm_mm;
424 pgd_t *pgdp;
c6e8b587 425 pud_t *pudp;
1da177e4
LT
426 pmd_t *pmdp;
427 pte_t *ptep;
428
79acf83e
RB
429 /*
430 * If ownes no valid ASID yet, cannot possibly have gotten
431 * this page into the cache.
432 */
26a51b27 433 if (cpu_context(smp_processor_id(), mm) == 0)
79acf83e
RB
434 return;
435
6ec25809
RB
436 addr &= PAGE_MASK;
437 pgdp = pgd_offset(mm, addr);
438 pudp = pud_offset(pgdp, addr);
439 pmdp = pmd_offset(pudp, addr);
440 ptep = pte_offset(pmdp, addr);
1da177e4
LT
441
442 /*
443 * If the page isn't marked valid, the page cannot possibly be
444 * in the cache.
445 */
446 if (!(pte_val(*ptep) & _PAGE_PRESENT))
447 return;
448
449 /*
450 * Doing flushes for another ASID than the current one is
451 * too difficult since stupid R4k caches do a TLB translation
452 * for every cache flush operation. So we do indexed flushes
453 * in that case, which doesn't overly flush the cache too much.
454 */
455 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
456 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
6ec25809 457 r4k_blast_dcache_page(addr);
1da177e4 458 if (exec && !cpu_icache_snoops_remote_store)
6ec25809 459 r4k_blast_scache_page(addr);
1da177e4
LT
460 }
461 if (exec)
6ec25809 462 r4k_blast_icache_page(addr);
1da177e4
LT
463
464 return;
465 }
466
467 /*
468 * Do indexed flush, too much work to get the (possible) TLB refills
469 * to work correctly.
470 */
1da177e4 471 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
de62893b
AN
472 r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
473 paddr : addr);
474 if (exec && !cpu_icache_snoops_remote_store) {
475 r4k_blast_scache_page_indexed(paddr);
476 }
1da177e4
LT
477 }
478 if (exec) {
479 if (cpu_has_vtag_icache) {
480 int cpu = smp_processor_id();
481
26a51b27
TS
482 if (cpu_context(cpu, mm) != 0)
483 drop_mmu_context(mm, cpu);
1da177e4 484 } else
6ec25809 485 r4k_blast_icache_page_indexed(addr);
1da177e4
LT
486 }
487}
488
6ec25809
RB
489static void r4k_flush_cache_page(struct vm_area_struct *vma,
490 unsigned long addr, unsigned long pfn)
1da177e4
LT
491{
492 struct flush_cache_page_args args;
493
1da177e4 494 args.vma = vma;
6ec25809 495 args.addr = addr;
de62893b 496 args.pfn = pfn;
1da177e4 497
7f3f1d01 498 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
1da177e4
LT
499}
500
501static inline void local_r4k_flush_data_cache_page(void * addr)
502{
503 r4k_blast_dcache_page((unsigned long) addr);
504}
505
506static void r4k_flush_data_cache_page(unsigned long addr)
507{
7f3f1d01 508 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
1da177e4
LT
509}
510
511struct flush_icache_range_args {
d4264f18
AN
512 unsigned long start;
513 unsigned long end;
1da177e4
LT
514};
515
516static inline void local_r4k_flush_icache_range(void *args)
517{
518 struct flush_icache_range_args *fir_args = args;
1da177e4
LT
519 unsigned long start = fir_args->start;
520 unsigned long end = fir_args->end;
1da177e4
LT
521
522 if (!cpu_has_ic_fills_f_dc) {
73f40352 523 if (end - start >= dcache_size) {
1da177e4
LT
524 r4k_blast_dcache();
525 } else {
10a3dabd 526 R4600_HIT_CACHEOP_WAR_IMPL;
41700e73 527 protected_blast_dcache_range(start, end);
1da177e4
LT
528 }
529
4debe4f9 530 if (!cpu_icache_snoops_remote_store && scache_size) {
41700e73 531 if (end - start > scache_size)
1da177e4 532 r4k_blast_scache();
41700e73
AN
533 else
534 protected_blast_scache_range(start, end);
1da177e4
LT
535 }
536 }
537
538 if (end - start > icache_size)
539 r4k_blast_icache();
41700e73
AN
540 else
541 protected_blast_icache_range(start, end);
1da177e4
LT
542}
543
d4264f18 544static void r4k_flush_icache_range(unsigned long start, unsigned long end)
1da177e4
LT
545{
546 struct flush_icache_range_args args;
547
548 args.start = start;
549 args.end = end;
550
7f3f1d01 551 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
cc61c1fe 552 instruction_hazard();
1da177e4
LT
553}
554
555/*
556 * Ok, this seriously sucks. We use them to flush a user page but don't
557 * know the virtual address, so we have to blast away the whole icache
558 * which is significantly more expensive than the real thing. Otoh we at
559 * least know the kernel address of the page so we can flush it
560 * selectivly.
561 */
562
563struct flush_icache_page_args {
564 struct vm_area_struct *vma;
565 struct page *page;
566};
567
568static inline void local_r4k_flush_icache_page(void *args)
569{
570 struct flush_icache_page_args *fip_args = args;
571 struct vm_area_struct *vma = fip_args->vma;
572 struct page *page = fip_args->page;
573
574 /*
575 * Tricky ... Because we don't know the virtual address we've got the
576 * choice of either invalidating the entire primary and secondary
577 * caches or invalidating the secondary caches also. With the subset
578 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
579 * secondary cache will result in any entries in the primary caches
580 * also getting invalidated which hopefully is a bit more economical.
581 */
582 if (cpu_has_subset_pcaches) {
583 unsigned long addr = (unsigned long) page_address(page);
584
585 r4k_blast_scache_page(addr);
586 ClearPageDcacheDirty(page);
587
588 return;
589 }
590
591 if (!cpu_has_ic_fills_f_dc) {
592 unsigned long addr = (unsigned long) page_address(page);
593 r4k_blast_dcache_page(addr);
594 if (!cpu_icache_snoops_remote_store)
595 r4k_blast_scache_page(addr);
596 ClearPageDcacheDirty(page);
597 }
598
599 /*
600 * We're not sure of the virtual address(es) involved here, so
601 * we have to flush the entire I-cache.
602 */
603 if (cpu_has_vtag_icache) {
604 int cpu = smp_processor_id();
605
606 if (cpu_context(cpu, vma->vm_mm) != 0)
607 drop_mmu_context(vma->vm_mm, cpu);
608 } else
609 r4k_blast_icache();
610}
611
612static void r4k_flush_icache_page(struct vm_area_struct *vma,
613 struct page *page)
614{
615 struct flush_icache_page_args args;
616
617 /*
618 * If there's no context yet, or the page isn't executable, no I-cache
619 * flush is needed.
620 */
621 if (!(vma->vm_flags & VM_EXEC))
622 return;
623
624 args.vma = vma;
625 args.page = page;
626
7f3f1d01 627 r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
1da177e4
LT
628}
629
630
631#ifdef CONFIG_DMA_NONCOHERENT
632
633static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
634{
1da177e4
LT
635 /* Catch bad driver code */
636 BUG_ON(size == 0);
637
638 if (cpu_has_subset_pcaches) {
41700e73 639 if (size >= scache_size)
1da177e4 640 r4k_blast_scache();
41700e73
AN
641 else
642 blast_scache_range(addr, addr + size);
1da177e4
LT
643 return;
644 }
645
646 /*
647 * Either no secondary cache or the available caches don't have the
648 * subset property so we have to flush the primary caches
649 * explicitly
650 */
651 if (size >= dcache_size) {
652 r4k_blast_dcache();
653 } else {
1da177e4 654 R4600_HIT_CACHEOP_WAR_IMPL;
41700e73 655 blast_dcache_range(addr, addr + size);
1da177e4
LT
656 }
657
658 bc_wback_inv(addr, size);
659}
660
661static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
662{
1da177e4
LT
663 /* Catch bad driver code */
664 BUG_ON(size == 0);
665
666 if (cpu_has_subset_pcaches) {
41700e73 667 if (size >= scache_size)
1da177e4 668 r4k_blast_scache();
41700e73
AN
669 else
670 blast_scache_range(addr, addr + size);
1da177e4
LT
671 return;
672 }
673
674 if (size >= dcache_size) {
675 r4k_blast_dcache();
676 } else {
1da177e4 677 R4600_HIT_CACHEOP_WAR_IMPL;
41700e73 678 blast_dcache_range(addr, addr + size);
1da177e4
LT
679 }
680
681 bc_inv(addr, size);
682}
683#endif /* CONFIG_DMA_NONCOHERENT */
684
685/*
686 * While we're protected against bad userland addresses we don't care
687 * very much about what happens in that case. Usually a segmentation
688 * fault will dump the process later on anyway ...
689 */
690static void local_r4k_flush_cache_sigtramp(void * arg)
691{
02fe2c9c
TS
692 unsigned long ic_lsize = cpu_icache_line_size();
693 unsigned long dc_lsize = cpu_dcache_line_size();
694 unsigned long sc_lsize = cpu_scache_line_size();
1da177e4
LT
695 unsigned long addr = (unsigned long) arg;
696
697 R4600_HIT_CACHEOP_WAR_IMPL;
73f40352
CD
698 if (dc_lsize)
699 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
4debe4f9 700 if (!cpu_icache_snoops_remote_store && scache_size)
1da177e4 701 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
73f40352
CD
702 if (ic_lsize)
703 protected_flush_icache_line(addr & ~(ic_lsize - 1));
1da177e4
LT
704 if (MIPS4K_ICACHE_REFILL_WAR) {
705 __asm__ __volatile__ (
706 ".set push\n\t"
707 ".set noat\n\t"
708 ".set mips3\n\t"
875d43e7 709#ifdef CONFIG_32BIT
1da177e4
LT
710 "la $at,1f\n\t"
711#endif
875d43e7 712#ifdef CONFIG_64BIT
1da177e4
LT
713 "dla $at,1f\n\t"
714#endif
715 "cache %0,($at)\n\t"
716 "nop; nop; nop\n"
717 "1:\n\t"
718 ".set pop"
719 :
720 : "i" (Hit_Invalidate_I));
721 }
722 if (MIPS_CACHE_SYNC_WAR)
723 __asm__ __volatile__ ("sync");
724}
725
726static void r4k_flush_cache_sigtramp(unsigned long addr)
727{
7f3f1d01 728 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
1da177e4
LT
729}
730
731static void r4k_flush_icache_all(void)
732{
733 if (cpu_has_vtag_icache)
734 r4k_blast_icache();
735}
736
737static inline void rm7k_erratum31(void)
738{
739 const unsigned long ic_lsize = 32;
740 unsigned long addr;
741
742 /* RM7000 erratum #31. The icache is screwed at startup. */
743 write_c0_taglo(0);
744 write_c0_taghi(0);
745
746 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
747 __asm__ __volatile__ (
d8748a3a 748 ".set push\n\t"
1da177e4
LT
749 ".set noreorder\n\t"
750 ".set mips3\n\t"
751 "cache\t%1, 0(%0)\n\t"
752 "cache\t%1, 0x1000(%0)\n\t"
753 "cache\t%1, 0x2000(%0)\n\t"
754 "cache\t%1, 0x3000(%0)\n\t"
755 "cache\t%2, 0(%0)\n\t"
756 "cache\t%2, 0x1000(%0)\n\t"
757 "cache\t%2, 0x2000(%0)\n\t"
758 "cache\t%2, 0x3000(%0)\n\t"
759 "cache\t%1, 0(%0)\n\t"
760 "cache\t%1, 0x1000(%0)\n\t"
761 "cache\t%1, 0x2000(%0)\n\t"
762 "cache\t%1, 0x3000(%0)\n\t"
d8748a3a 763 ".set pop\n"
1da177e4
LT
764 :
765 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
766 }
767}
768
769static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
770 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
771};
772
773static void __init probe_pcache(void)
774{
775 struct cpuinfo_mips *c = &current_cpu_data;
776 unsigned int config = read_c0_config();
777 unsigned int prid = read_c0_prid();
778 unsigned long config1;
779 unsigned int lsize;
780
781 switch (c->cputype) {
782 case CPU_R4600: /* QED style two way caches? */
783 case CPU_R4700:
784 case CPU_R5000:
785 case CPU_NEVADA:
786 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
787 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
788 c->icache.ways = 2;
3c68da79 789 c->icache.waybit = __ffs(icache_size/2);
1da177e4
LT
790
791 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
792 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
793 c->dcache.ways = 2;
3c68da79 794 c->dcache.waybit= __ffs(dcache_size/2);
1da177e4
LT
795
796 c->options |= MIPS_CPU_CACHE_CDEX_P;
797 break;
798
799 case CPU_R5432:
800 case CPU_R5500:
801 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
802 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
803 c->icache.ways = 2;
804 c->icache.waybit= 0;
805
806 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
807 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
808 c->dcache.ways = 2;
809 c->dcache.waybit = 0;
810
811 c->options |= MIPS_CPU_CACHE_CDEX_P;
812 break;
813
814 case CPU_TX49XX:
815 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
816 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
817 c->icache.ways = 4;
818 c->icache.waybit= 0;
819
820 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
821 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
822 c->dcache.ways = 4;
823 c->dcache.waybit = 0;
824
825 c->options |= MIPS_CPU_CACHE_CDEX_P;
de862b48 826 c->options |= MIPS_CPU_PREFETCH;
1da177e4
LT
827 break;
828
829 case CPU_R4000PC:
830 case CPU_R4000SC:
831 case CPU_R4000MC:
832 case CPU_R4400PC:
833 case CPU_R4400SC:
834 case CPU_R4400MC:
835 case CPU_R4300:
836 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
837 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
838 c->icache.ways = 1;
839 c->icache.waybit = 0; /* doesn't matter */
840
841 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
842 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
843 c->dcache.ways = 1;
844 c->dcache.waybit = 0; /* does not matter */
845
846 c->options |= MIPS_CPU_CACHE_CDEX_P;
847 break;
848
849 case CPU_R10000:
850 case CPU_R12000:
44d921b2 851 case CPU_R14000:
1da177e4
LT
852 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
853 c->icache.linesz = 64;
854 c->icache.ways = 2;
855 c->icache.waybit = 0;
856
857 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
858 c->dcache.linesz = 32;
859 c->dcache.ways = 2;
860 c->dcache.waybit = 0;
861
862 c->options |= MIPS_CPU_PREFETCH;
863 break;
864
865 case CPU_VR4133:
866 write_c0_config(config & ~CONF_EB);
867 case CPU_VR4131:
868 /* Workaround for cache instruction bug of VR4131 */
869 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
870 c->processor_id == 0x0c82U) {
871 config &= ~0x00000030U;
872 config |= 0x00410000U;
873 write_c0_config(config);
874 }
875 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
876 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
877 c->icache.ways = 2;
3c68da79 878 c->icache.waybit = __ffs(icache_size/2);
1da177e4
LT
879
880 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
881 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
882 c->dcache.ways = 2;
3c68da79 883 c->dcache.waybit = __ffs(dcache_size/2);
1da177e4
LT
884
885 c->options |= MIPS_CPU_CACHE_CDEX_P;
886 break;
887
888 case CPU_VR41XX:
889 case CPU_VR4111:
890 case CPU_VR4121:
891 case CPU_VR4122:
892 case CPU_VR4181:
893 case CPU_VR4181A:
894 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
895 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
896 c->icache.ways = 1;
897 c->icache.waybit = 0; /* doesn't matter */
898
899 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
900 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
901 c->dcache.ways = 1;
902 c->dcache.waybit = 0; /* does not matter */
903
904 c->options |= MIPS_CPU_CACHE_CDEX_P;
905 break;
906
907 case CPU_RM7000:
908 rm7k_erratum31();
909
910 case CPU_RM9000:
911 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
912 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
913 c->icache.ways = 4;
3c68da79 914 c->icache.waybit = __ffs(icache_size / c->icache.ways);
1da177e4
LT
915
916 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
917 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
918 c->dcache.ways = 4;
3c68da79 919 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1da177e4
LT
920
921#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
922 c->options |= MIPS_CPU_CACHE_CDEX_P;
923#endif
924 c->options |= MIPS_CPU_PREFETCH;
925 break;
926
927 default:
928 if (!(config & MIPS_CONF_M))
929 panic("Don't know how to probe P-caches on this cpu.");
930
931 /*
932 * So we seem to be a MIPS32 or MIPS64 CPU
933 * So let's probe the I-cache ...
934 */
935 config1 = read_c0_config1();
936
937 if ((lsize = ((config1 >> 19) & 7)))
938 c->icache.linesz = 2 << lsize;
939 else
940 c->icache.linesz = lsize;
941 c->icache.sets = 64 << ((config1 >> 22) & 7);
942 c->icache.ways = 1 + ((config1 >> 16) & 7);
943
944 icache_size = c->icache.sets *
945 c->icache.ways *
946 c->icache.linesz;
3c68da79 947 c->icache.waybit = __ffs(icache_size/c->icache.ways);
1da177e4
LT
948
949 if (config & 0x8) /* VI bit */
950 c->icache.flags |= MIPS_CACHE_VTAG;
951
952 /*
953 * Now probe the MIPS32 / MIPS64 data cache.
954 */
955 c->dcache.flags = 0;
956
957 if ((lsize = ((config1 >> 10) & 7)))
958 c->dcache.linesz = 2 << lsize;
959 else
960 c->dcache.linesz= lsize;
961 c->dcache.sets = 64 << ((config1 >> 13) & 7);
962 c->dcache.ways = 1 + ((config1 >> 7) & 7);
963
964 dcache_size = c->dcache.sets *
965 c->dcache.ways *
966 c->dcache.linesz;
3c68da79 967 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1da177e4
LT
968
969 c->options |= MIPS_CPU_PREFETCH;
970 break;
971 }
972
973 /*
974 * Processor configuration sanity check for the R4000SC erratum
975 * #5. With page sizes larger than 32kB there is no possibility
976 * to get a VCE exception anymore so we don't care about this
977 * misconfiguration. The case is rather theoretical anyway;
978 * presumably no vendor is shipping his hardware in the "bad"
979 * configuration.
980 */
981 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
982 !(config & CONF_SC) && c->icache.linesz != 16 &&
983 PAGE_SIZE <= 0x8000)
984 panic("Improper R4000SC processor configuration detected");
985
986 /* compute a couple of other cache variables */
987 c->icache.waysize = icache_size / c->icache.ways;
988 c->dcache.waysize = dcache_size / c->dcache.ways;
989
73f40352
CD
990 c->icache.sets = c->icache.linesz ?
991 icache_size / (c->icache.linesz * c->icache.ways) : 0;
992 c->dcache.sets = c->dcache.linesz ?
993 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1da177e4
LT
994
995 /*
996 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
997 * 2-way virtually indexed so normally would suffer from aliases. So
998 * normally they'd suffer from aliases but magic in the hardware deals
999 * with that for us so we don't need to take care ourselves.
1000 */
d1e344e5 1001 switch (c->cputype) {
a95970f3 1002 case CPU_20KC:
505403b6 1003 case CPU_25KF:
de62893b 1004 c->dcache.flags |= MIPS_CACHE_PINDEX;
d1e344e5
RB
1005 case CPU_R10000:
1006 case CPU_R12000:
44d921b2 1007 case CPU_R14000:
a95970f3 1008 case CPU_SB1:
d1e344e5
RB
1009 break;
1010 case CPU_24K:
98a41de9 1011 case CPU_34K:
d1e344e5
RB
1012 if (!(read_c0_config7() & (1 << 16)))
1013 default:
ae6aafe3
RB
1014 if (c->dcache.waysize > PAGE_SIZE)
1015 c->dcache.flags |= MIPS_CACHE_ALIASES;
d1e344e5 1016 }
1da177e4
LT
1017
1018 switch (c->cputype) {
1019 case CPU_20KC:
1020 /*
1021 * Some older 20Kc chips doesn't have the 'VI' bit in
1022 * the config register.
1023 */
1024 c->icache.flags |= MIPS_CACHE_VTAG;
1025 break;
1026
e3ad1c23 1027 case CPU_AU1000:
1da177e4 1028 case CPU_AU1500:
e3ad1c23
PP
1029 case CPU_AU1100:
1030 case CPU_AU1550:
1031 case CPU_AU1200:
1da177e4
LT
1032 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1033 break;
1034 }
1035
1036 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1037 icache_size >> 10,
1038 cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
1039 way_string[c->icache.ways], c->icache.linesz);
1040
1041 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
1042 dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
1043}
1044
1045/*
1046 * If you even _breathe_ on this function, look at the gcc output and make sure
1047 * it does not pop things on and off the stack for the cache sizing loop that
1048 * executes in KSEG1 space or else you will crash and burn badly. You have
1049 * been warned.
1050 */
1051static int __init probe_scache(void)
1052{
1053 extern unsigned long stext;
1054 unsigned long flags, addr, begin, end, pow2;
1055 unsigned int config = read_c0_config();
1056 struct cpuinfo_mips *c = &current_cpu_data;
1057 int tmp;
1058
1059 if (config & CONF_SC)
1060 return 0;
1061
1062 begin = (unsigned long) &stext;
1063 begin &= ~((4 * 1024 * 1024) - 1);
1064 end = begin + (4 * 1024 * 1024);
1065
1066 /*
1067 * This is such a bitch, you'd think they would make it easy to do
1068 * this. Away you daemons of stupidity!
1069 */
1070 local_irq_save(flags);
1071
1072 /* Fill each size-multiple cache line with a valid tag. */
1073 pow2 = (64 * 1024);
1074 for (addr = begin; addr < end; addr = (begin + pow2)) {
1075 unsigned long *p = (unsigned long *) addr;
1076 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1077 pow2 <<= 1;
1078 }
1079
1080 /* Load first line with zero (therefore invalid) tag. */
1081 write_c0_taglo(0);
1082 write_c0_taghi(0);
1083 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1084 cache_op(Index_Store_Tag_I, begin);
1085 cache_op(Index_Store_Tag_D, begin);
1086 cache_op(Index_Store_Tag_SD, begin);
1087
1088 /* Now search for the wrap around point. */
1089 pow2 = (128 * 1024);
1090 tmp = 0;
1091 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1092 cache_op(Index_Load_Tag_SD, addr);
1093 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1094 if (!read_c0_taglo())
1095 break;
1096 pow2 <<= 1;
1097 }
1098 local_irq_restore(flags);
1099 addr -= begin;
1100
1101 scache_size = addr;
1102 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1103 c->scache.ways = 1;
1104 c->dcache.waybit = 0; /* does not matter */
1105
1106 return 1;
1107}
1108
1da177e4
LT
1109extern int r5k_sc_init(void);
1110extern int rm7k_sc_init(void);
9318c51a 1111extern int mips_sc_init(void);
1da177e4
LT
1112
1113static void __init setup_scache(void)
1114{
1115 struct cpuinfo_mips *c = &current_cpu_data;
1116 unsigned int config = read_c0_config();
1da177e4
LT
1117 int sc_present = 0;
1118
1119 /*
1120 * Do the probing thing on R4000SC and R4400SC processors. Other
1121 * processors don't have a S-cache that would be relevant to the
1122 * Linux memory managment.
1123 */
1124 switch (c->cputype) {
1125 case CPU_R4000SC:
1126 case CPU_R4000MC:
1127 case CPU_R4400SC:
1128 case CPU_R4400MC:
ba5187db 1129 sc_present = run_uncached(probe_scache);
1da177e4
LT
1130 if (sc_present)
1131 c->options |= MIPS_CPU_CACHE_CDEX_S;
1132 break;
1133
1134 case CPU_R10000:
1135 case CPU_R12000:
44d921b2 1136 case CPU_R14000:
1da177e4
LT
1137 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1138 c->scache.linesz = 64 << ((config >> 13) & 1);
1139 c->scache.ways = 2;
1140 c->scache.waybit= 0;
1141 sc_present = 1;
1142 break;
1143
1144 case CPU_R5000:
1145 case CPU_NEVADA:
1146#ifdef CONFIG_R5000_CPU_SCACHE
1147 r5k_sc_init();
1148#endif
1149 return;
1150
1151 case CPU_RM7000:
1152 case CPU_RM9000:
1153#ifdef CONFIG_RM7000_CPU_SCACHE
1154 rm7k_sc_init();
1155#endif
1156 return;
1157
1158 default:
9318c51a
CD
1159 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1160 c->isa_level == MIPS_CPU_ISA_M32R2 ||
1161 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1162 c->isa_level == MIPS_CPU_ISA_M64R2) {
1163#ifdef CONFIG_MIPS_CPU_SCACHE
1164 if (mips_sc_init ()) {
1165 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1166 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1167 scache_size >> 10,
1168 way_string[c->scache.ways], c->scache.linesz);
1169 }
1170#else
1171 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1172 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1173#endif
1174 return;
1175 }
1da177e4
LT
1176 sc_present = 0;
1177 }
1178
1179 if (!sc_present)
1180 return;
1181
1da177e4
LT
1182 /* compute a couple of other cache variables */
1183 c->scache.waysize = scache_size / c->scache.ways;
1184
1185 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1186
1187 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1188 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1189
1190 c->options |= MIPS_CPU_SUBSET_CACHES;
1191}
1192
9370b351
SS
1193void au1x00_fixup_config_od(void)
1194{
1195 /*
1196 * c0_config.od (bit 19) was write only (and read as 0)
1197 * on the early revisions of Alchemy SOCs. It disables the bus
1198 * transaction overlapping and needs to be set to fix various errata.
1199 */
1200 switch (read_c0_prid()) {
1201 case 0x00030100: /* Au1000 DA */
1202 case 0x00030201: /* Au1000 HA */
1203 case 0x00030202: /* Au1000 HB */
1204 case 0x01030200: /* Au1500 AB */
1205 /*
1206 * Au1100 errata actually keeps silence about this bit, so we set it
1207 * just in case for those revisions that require it to be set according
1208 * to arch/mips/au1000/common/cputable.c
1209 */
1210 case 0x02030200: /* Au1100 AB */
1211 case 0x02030201: /* Au1100 BA */
1212 case 0x02030202: /* Au1100 BC */
1213 set_c0_config(1 << 19);
1214 break;
1215 }
1216}
1217
1da177e4
LT
1218static inline void coherency_setup(void)
1219{
1220 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1221
1222 /*
1223 * c0_status.cu=0 specifies that updates by the sc instruction use
1224 * the coherency mode specified by the TLB; 1 means cachable
1225 * coherent update on write will be used. Not all processors have
1226 * this bit and; some wire it to zero, others like Toshiba had the
1227 * silly idea of putting something else there ...
1228 */
1229 switch (current_cpu_data.cputype) {
1230 case CPU_R4000PC:
1231 case CPU_R4000SC:
1232 case CPU_R4000MC:
1233 case CPU_R4400PC:
1234 case CPU_R4400SC:
1235 case CPU_R4400MC:
1236 clear_c0_config(CONF_CU);
1237 break;
9370b351
SS
1238 /*
1239 * We need to catch the ealry Alchemy SOCs with
1240 * the write-only co_config.od bit and set it back to one...
1241 */
1242 case CPU_AU1000: /* rev. DA, HA, HB */
1243 case CPU_AU1100: /* rev. AB, BA, BC ?? */
1244 case CPU_AU1500: /* rev. AB */
1245 au1x00_fixup_config_od();
1246 break;
1da177e4
LT
1247 }
1248}
1249
02cf2119 1250void __init r4k_cache_init(void)
1da177e4
LT
1251{
1252 extern void build_clear_page(void);
1253 extern void build_copy_page(void);
1254 extern char except_vec2_generic;
1255 struct cpuinfo_mips *c = &current_cpu_data;
1256
1257 /* Default cache error handler for R4000 and R5000 family */
e01402b1 1258 set_uncached_handler (0x100, &except_vec2_generic, 0x80);
1da177e4
LT
1259
1260 probe_pcache();
1261 setup_scache();
1262
1da177e4
LT
1263 r4k_blast_dcache_page_setup();
1264 r4k_blast_dcache_page_indexed_setup();
1265 r4k_blast_dcache_setup();
1266 r4k_blast_icache_page_setup();
1267 r4k_blast_icache_page_indexed_setup();
1268 r4k_blast_icache_setup();
1269 r4k_blast_scache_page_setup();
1270 r4k_blast_scache_page_indexed_setup();
1271 r4k_blast_scache_setup();
1272
1273 /*
1274 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1275 * This code supports virtually indexed processors and will be
1276 * unnecessarily inefficient on physically indexed processors.
1277 */
73f40352
CD
1278 if (c->dcache.linesz)
1279 shm_align_mask = max_t( unsigned long,
1280 c->dcache.sets * c->dcache.linesz - 1,
1281 PAGE_SIZE - 1);
1282 else
1283 shm_align_mask = PAGE_SIZE-1;
1da177e4
LT
1284 flush_cache_all = r4k_flush_cache_all;
1285 __flush_cache_all = r4k___flush_cache_all;
1286 flush_cache_mm = r4k_flush_cache_mm;
1287 flush_cache_page = r4k_flush_cache_page;
1288 flush_icache_page = r4k_flush_icache_page;
1289 flush_cache_range = r4k_flush_cache_range;
1290
1291 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1292 flush_icache_all = r4k_flush_icache_all;
7e3bfc7c 1293 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1da177e4
LT
1294 flush_data_cache_page = r4k_flush_data_cache_page;
1295 flush_icache_range = r4k_flush_icache_range;
1296
1297#ifdef CONFIG_DMA_NONCOHERENT
1298 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1299 _dma_cache_wback = r4k_dma_cache_wback_inv;
1300 _dma_cache_inv = r4k_dma_cache_inv;
1301#endif
1302
1da177e4
LT
1303 build_clear_page();
1304 build_copy_page();
1d40cfcd
RB
1305 local_r4k___flush_cache_all(NULL);
1306 coherency_setup();
1da177e4 1307}