[MIPS] Fix FIXADDR_TOP for TX39/TX49.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / mm / c-r4k.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10#include <linux/config.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16
17#include <asm/bcache.h>
18#include <asm/bootinfo.h>
ec74e361 19#include <asm/cache.h>
1da177e4
LT
20#include <asm/cacheops.h>
21#include <asm/cpu.h>
22#include <asm/cpu-features.h>
23#include <asm/io.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/r4kcache.h>
27#include <asm/system.h>
28#include <asm/mmu_context.h>
29#include <asm/war.h>
ba5187db 30#include <asm/cacheflush.h> /* for run_uncached() */
1da177e4 31
7f3f1d01
RB
32
33/*
34 * Special Variant of smp_call_function for use by cache functions:
35 *
36 * o No return value
37 * o collapses to normal function call on UP kernels
38 * o collapses to normal function call on systems with a single shared
39 * primary cache.
40 */
41static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
42 int retry, int wait)
43{
44 preempt_disable();
45
46#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
47 smp_call_function(func, info, retry, wait);
48#endif
49 func(info);
50 preempt_enable();
51}
52
ec74e361
RB
53/*
54 * Must die.
55 */
56static unsigned long icache_size __read_mostly;
57static unsigned long dcache_size __read_mostly;
58static unsigned long scache_size __read_mostly;
1da177e4
LT
59
60/*
61 * Dummy cache handling routines for machines without boardcaches
62 */
63static void no_sc_noop(void) {}
64
65static struct bcache_ops no_sc_ops = {
66 .bc_enable = (void *)no_sc_noop,
67 .bc_disable = (void *)no_sc_noop,
68 .bc_wback_inv = (void *)no_sc_noop,
69 .bc_inv = (void *)no_sc_noop
70};
71
72struct bcache_ops *bcops = &no_sc_ops;
73
330cfe01
TS
74#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
75#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
1da177e4
LT
76
77#define R4600_HIT_CACHEOP_WAR_IMPL \
78do { \
79 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
80 *(volatile unsigned long *)CKSEG1; \
81 if (R4600_V1_HIT_CACHEOP_WAR) \
82 __asm__ __volatile__("nop;nop;nop;nop"); \
83} while (0)
84
85static void (*r4k_blast_dcache_page)(unsigned long addr);
86
87static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
88{
89 R4600_HIT_CACHEOP_WAR_IMPL;
90 blast_dcache32_page(addr);
91}
92
93static inline void r4k_blast_dcache_page_setup(void)
94{
95 unsigned long dc_lsize = cpu_dcache_line_size();
96
97 if (dc_lsize == 16)
98 r4k_blast_dcache_page = blast_dcache16_page;
99 else if (dc_lsize == 32)
100 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
101}
102
103static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
104
105static inline void r4k_blast_dcache_page_indexed_setup(void)
106{
107 unsigned long dc_lsize = cpu_dcache_line_size();
108
109 if (dc_lsize == 16)
110 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
111 else if (dc_lsize == 32)
112 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
113}
114
115static void (* r4k_blast_dcache)(void);
116
117static inline void r4k_blast_dcache_setup(void)
118{
119 unsigned long dc_lsize = cpu_dcache_line_size();
120
121 if (dc_lsize == 16)
122 r4k_blast_dcache = blast_dcache16;
123 else if (dc_lsize == 32)
124 r4k_blast_dcache = blast_dcache32;
125}
126
127/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
128#define JUMP_TO_ALIGN(order) \
129 __asm__ __volatile__( \
130 "b\t1f\n\t" \
131 ".align\t" #order "\n\t" \
132 "1:\n\t" \
133 )
134#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
135#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
136
137static inline void blast_r4600_v1_icache32(void)
138{
139 unsigned long flags;
140
141 local_irq_save(flags);
142 blast_icache32();
143 local_irq_restore(flags);
144}
145
146static inline void tx49_blast_icache32(void)
147{
148 unsigned long start = INDEX_BASE;
149 unsigned long end = start + current_cpu_data.icache.waysize;
150 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
151 unsigned long ws_end = current_cpu_data.icache.ways <<
152 current_cpu_data.icache.waybit;
153 unsigned long ws, addr;
154
155 CACHE32_UNROLL32_ALIGN2;
156 /* I'm in even chunk. blast odd chunks */
42a3b4f2
RB
157 for (ws = 0; ws < ws_end; ws += ws_inc)
158 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
1da177e4
LT
159 cache32_unroll32(addr|ws,Index_Invalidate_I);
160 CACHE32_UNROLL32_ALIGN;
161 /* I'm in odd chunk. blast even chunks */
42a3b4f2
RB
162 for (ws = 0; ws < ws_end; ws += ws_inc)
163 for (addr = start; addr < end; addr += 0x400 * 2)
1da177e4
LT
164 cache32_unroll32(addr|ws,Index_Invalidate_I);
165}
166
167static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
168{
169 unsigned long flags;
170
171 local_irq_save(flags);
172 blast_icache32_page_indexed(page);
173 local_irq_restore(flags);
174}
175
176static inline void tx49_blast_icache32_page_indexed(unsigned long page)
177{
67a3f6de
AN
178 unsigned long indexmask = current_cpu_data.icache.waysize - 1;
179 unsigned long start = INDEX_BASE + (page & indexmask);
1da177e4
LT
180 unsigned long end = start + PAGE_SIZE;
181 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
182 unsigned long ws_end = current_cpu_data.icache.ways <<
183 current_cpu_data.icache.waybit;
184 unsigned long ws, addr;
185
186 CACHE32_UNROLL32_ALIGN2;
187 /* I'm in even chunk. blast odd chunks */
42a3b4f2
RB
188 for (ws = 0; ws < ws_end; ws += ws_inc)
189 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
1da177e4
LT
190 cache32_unroll32(addr|ws,Index_Invalidate_I);
191 CACHE32_UNROLL32_ALIGN;
192 /* I'm in odd chunk. blast even chunks */
42a3b4f2
RB
193 for (ws = 0; ws < ws_end; ws += ws_inc)
194 for (addr = start; addr < end; addr += 0x400 * 2)
1da177e4
LT
195 cache32_unroll32(addr|ws,Index_Invalidate_I);
196}
197
198static void (* r4k_blast_icache_page)(unsigned long addr);
199
200static inline void r4k_blast_icache_page_setup(void)
201{
202 unsigned long ic_lsize = cpu_icache_line_size();
203
204 if (ic_lsize == 16)
205 r4k_blast_icache_page = blast_icache16_page;
206 else if (ic_lsize == 32)
207 r4k_blast_icache_page = blast_icache32_page;
208 else if (ic_lsize == 64)
209 r4k_blast_icache_page = blast_icache64_page;
210}
211
212
213static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
214
215static inline void r4k_blast_icache_page_indexed_setup(void)
216{
217 unsigned long ic_lsize = cpu_icache_line_size();
218
219 if (ic_lsize == 16)
220 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
221 else if (ic_lsize == 32) {
02fe2c9c 222 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
1da177e4
LT
223 r4k_blast_icache_page_indexed =
224 blast_icache32_r4600_v1_page_indexed;
02fe2c9c
TS
225 else if (TX49XX_ICACHE_INDEX_INV_WAR)
226 r4k_blast_icache_page_indexed =
227 tx49_blast_icache32_page_indexed;
1da177e4
LT
228 else
229 r4k_blast_icache_page_indexed =
230 blast_icache32_page_indexed;
231 } else if (ic_lsize == 64)
232 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
233}
234
235static void (* r4k_blast_icache)(void);
236
237static inline void r4k_blast_icache_setup(void)
238{
239 unsigned long ic_lsize = cpu_icache_line_size();
240
241 if (ic_lsize == 16)
242 r4k_blast_icache = blast_icache16;
243 else if (ic_lsize == 32) {
244 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
245 r4k_blast_icache = blast_r4600_v1_icache32;
246 else if (TX49XX_ICACHE_INDEX_INV_WAR)
247 r4k_blast_icache = tx49_blast_icache32;
248 else
249 r4k_blast_icache = blast_icache32;
250 } else if (ic_lsize == 64)
251 r4k_blast_icache = blast_icache64;
252}
253
254static void (* r4k_blast_scache_page)(unsigned long addr);
255
256static inline void r4k_blast_scache_page_setup(void)
257{
258 unsigned long sc_lsize = cpu_scache_line_size();
259
4debe4f9
RB
260 if (scache_size == 0)
261 r4k_blast_scache_page = (void *)no_sc_noop;
262 else if (sc_lsize == 16)
1da177e4
LT
263 r4k_blast_scache_page = blast_scache16_page;
264 else if (sc_lsize == 32)
265 r4k_blast_scache_page = blast_scache32_page;
266 else if (sc_lsize == 64)
267 r4k_blast_scache_page = blast_scache64_page;
268 else if (sc_lsize == 128)
269 r4k_blast_scache_page = blast_scache128_page;
270}
271
272static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
273
274static inline void r4k_blast_scache_page_indexed_setup(void)
275{
276 unsigned long sc_lsize = cpu_scache_line_size();
277
4debe4f9
RB
278 if (scache_size == 0)
279 r4k_blast_scache_page_indexed = (void *)no_sc_noop;
280 else if (sc_lsize == 16)
1da177e4
LT
281 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
282 else if (sc_lsize == 32)
283 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
284 else if (sc_lsize == 64)
285 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
286 else if (sc_lsize == 128)
287 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
288}
289
290static void (* r4k_blast_scache)(void);
291
292static inline void r4k_blast_scache_setup(void)
293{
294 unsigned long sc_lsize = cpu_scache_line_size();
295
4debe4f9
RB
296 if (scache_size == 0)
297 r4k_blast_scache = (void *)no_sc_noop;
298 else if (sc_lsize == 16)
1da177e4
LT
299 r4k_blast_scache = blast_scache16;
300 else if (sc_lsize == 32)
301 r4k_blast_scache = blast_scache32;
302 else if (sc_lsize == 64)
303 r4k_blast_scache = blast_scache64;
304 else if (sc_lsize == 128)
305 r4k_blast_scache = blast_scache128;
306}
307
308/*
309 * This is former mm's flush_cache_all() which really should be
310 * flush_cache_vunmap these days ...
311 */
312static inline void local_r4k_flush_cache_all(void * args)
313{
314 r4k_blast_dcache();
315 r4k_blast_icache();
316}
317
318static void r4k_flush_cache_all(void)
319{
320 if (!cpu_has_dc_aliases)
321 return;
322
7f3f1d01 323 r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
1da177e4
LT
324}
325
326static inline void local_r4k___flush_cache_all(void * args)
327{
328 r4k_blast_dcache();
329 r4k_blast_icache();
330
331 switch (current_cpu_data.cputype) {
332 case CPU_R4000SC:
333 case CPU_R4000MC:
334 case CPU_R4400SC:
335 case CPU_R4400MC:
336 case CPU_R10000:
337 case CPU_R12000:
44d921b2 338 case CPU_R14000:
1da177e4
LT
339 r4k_blast_scache();
340 }
341}
342
343static void r4k___flush_cache_all(void)
344{
7f3f1d01 345 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
1da177e4
LT
346}
347
348static inline void local_r4k_flush_cache_range(void * args)
349{
350 struct vm_area_struct *vma = args;
351 int exec;
352
353 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
354 return;
355
356 exec = vma->vm_flags & VM_EXEC;
357 if (cpu_has_dc_aliases || exec)
358 r4k_blast_dcache();
359 if (exec)
360 r4k_blast_icache();
361}
362
363static void r4k_flush_cache_range(struct vm_area_struct *vma,
364 unsigned long start, unsigned long end)
365{
7f3f1d01 366 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
1da177e4
LT
367}
368
369static inline void local_r4k_flush_cache_mm(void * args)
370{
371 struct mm_struct *mm = args;
372
373 if (!cpu_context(smp_processor_id(), mm))
374 return;
375
376 r4k_blast_dcache();
377 r4k_blast_icache();
378
379 /*
380 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
381 * only flush the primary caches but R10000 and R12000 behave sane ...
382 */
383 if (current_cpu_data.cputype == CPU_R4000SC ||
384 current_cpu_data.cputype == CPU_R4000MC ||
385 current_cpu_data.cputype == CPU_R4400SC ||
386 current_cpu_data.cputype == CPU_R4400MC)
387 r4k_blast_scache();
388}
389
390static void r4k_flush_cache_mm(struct mm_struct *mm)
391{
392 if (!cpu_has_dc_aliases)
393 return;
394
7f3f1d01 395 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
1da177e4
LT
396}
397
398struct flush_cache_page_args {
399 struct vm_area_struct *vma;
6ec25809 400 unsigned long addr;
de62893b 401 unsigned long pfn;
1da177e4
LT
402};
403
404static inline void local_r4k_flush_cache_page(void *args)
405{
406 struct flush_cache_page_args *fcp_args = args;
407 struct vm_area_struct *vma = fcp_args->vma;
6ec25809 408 unsigned long addr = fcp_args->addr;
de62893b 409 unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
1da177e4
LT
410 int exec = vma->vm_flags & VM_EXEC;
411 struct mm_struct *mm = vma->vm_mm;
412 pgd_t *pgdp;
c6e8b587 413 pud_t *pudp;
1da177e4
LT
414 pmd_t *pmdp;
415 pte_t *ptep;
416
79acf83e
RB
417 /*
418 * If ownes no valid ASID yet, cannot possibly have gotten
419 * this page into the cache.
420 */
26a51b27 421 if (cpu_context(smp_processor_id(), mm) == 0)
79acf83e
RB
422 return;
423
6ec25809
RB
424 addr &= PAGE_MASK;
425 pgdp = pgd_offset(mm, addr);
426 pudp = pud_offset(pgdp, addr);
427 pmdp = pmd_offset(pudp, addr);
428 ptep = pte_offset(pmdp, addr);
1da177e4
LT
429
430 /*
431 * If the page isn't marked valid, the page cannot possibly be
432 * in the cache.
433 */
434 if (!(pte_val(*ptep) & _PAGE_PRESENT))
435 return;
436
437 /*
438 * Doing flushes for another ASID than the current one is
439 * too difficult since stupid R4k caches do a TLB translation
440 * for every cache flush operation. So we do indexed flushes
441 * in that case, which doesn't overly flush the cache too much.
442 */
443 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
444 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
6ec25809 445 r4k_blast_dcache_page(addr);
1da177e4 446 if (exec && !cpu_icache_snoops_remote_store)
6ec25809 447 r4k_blast_scache_page(addr);
1da177e4
LT
448 }
449 if (exec)
6ec25809 450 r4k_blast_icache_page(addr);
1da177e4
LT
451
452 return;
453 }
454
455 /*
456 * Do indexed flush, too much work to get the (possible) TLB refills
457 * to work correctly.
458 */
1da177e4 459 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
de62893b
AN
460 r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
461 paddr : addr);
462 if (exec && !cpu_icache_snoops_remote_store) {
463 r4k_blast_scache_page_indexed(paddr);
464 }
1da177e4
LT
465 }
466 if (exec) {
467 if (cpu_has_vtag_icache) {
468 int cpu = smp_processor_id();
469
26a51b27
TS
470 if (cpu_context(cpu, mm) != 0)
471 drop_mmu_context(mm, cpu);
1da177e4 472 } else
6ec25809 473 r4k_blast_icache_page_indexed(addr);
1da177e4
LT
474 }
475}
476
6ec25809
RB
477static void r4k_flush_cache_page(struct vm_area_struct *vma,
478 unsigned long addr, unsigned long pfn)
1da177e4
LT
479{
480 struct flush_cache_page_args args;
481
1da177e4 482 args.vma = vma;
6ec25809 483 args.addr = addr;
de62893b 484 args.pfn = pfn;
1da177e4 485
7f3f1d01 486 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
1da177e4
LT
487}
488
489static inline void local_r4k_flush_data_cache_page(void * addr)
490{
491 r4k_blast_dcache_page((unsigned long) addr);
492}
493
494static void r4k_flush_data_cache_page(unsigned long addr)
495{
7f3f1d01 496 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
1da177e4
LT
497}
498
499struct flush_icache_range_args {
d4264f18
AN
500 unsigned long start;
501 unsigned long end;
1da177e4
LT
502};
503
504static inline void local_r4k_flush_icache_range(void *args)
505{
506 struct flush_icache_range_args *fir_args = args;
1da177e4
LT
507 unsigned long start = fir_args->start;
508 unsigned long end = fir_args->end;
1da177e4
LT
509
510 if (!cpu_has_ic_fills_f_dc) {
511 if (end - start > dcache_size) {
512 r4k_blast_dcache();
513 } else {
10a3dabd 514 R4600_HIT_CACHEOP_WAR_IMPL;
41700e73 515 protected_blast_dcache_range(start, end);
1da177e4
LT
516 }
517
4debe4f9 518 if (!cpu_icache_snoops_remote_store && scache_size) {
41700e73 519 if (end - start > scache_size)
1da177e4 520 r4k_blast_scache();
41700e73
AN
521 else
522 protected_blast_scache_range(start, end);
1da177e4
LT
523 }
524 }
525
526 if (end - start > icache_size)
527 r4k_blast_icache();
41700e73
AN
528 else
529 protected_blast_icache_range(start, end);
1da177e4
LT
530}
531
d4264f18 532static void r4k_flush_icache_range(unsigned long start, unsigned long end)
1da177e4
LT
533{
534 struct flush_icache_range_args args;
535
536 args.start = start;
537 args.end = end;
538
7f3f1d01 539 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
cc61c1fe 540 instruction_hazard();
1da177e4
LT
541}
542
543/*
544 * Ok, this seriously sucks. We use them to flush a user page but don't
545 * know the virtual address, so we have to blast away the whole icache
546 * which is significantly more expensive than the real thing. Otoh we at
547 * least know the kernel address of the page so we can flush it
548 * selectivly.
549 */
550
551struct flush_icache_page_args {
552 struct vm_area_struct *vma;
553 struct page *page;
554};
555
556static inline void local_r4k_flush_icache_page(void *args)
557{
558 struct flush_icache_page_args *fip_args = args;
559 struct vm_area_struct *vma = fip_args->vma;
560 struct page *page = fip_args->page;
561
562 /*
563 * Tricky ... Because we don't know the virtual address we've got the
564 * choice of either invalidating the entire primary and secondary
565 * caches or invalidating the secondary caches also. With the subset
566 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
567 * secondary cache will result in any entries in the primary caches
568 * also getting invalidated which hopefully is a bit more economical.
569 */
570 if (cpu_has_subset_pcaches) {
571 unsigned long addr = (unsigned long) page_address(page);
572
573 r4k_blast_scache_page(addr);
574 ClearPageDcacheDirty(page);
575
576 return;
577 }
578
579 if (!cpu_has_ic_fills_f_dc) {
580 unsigned long addr = (unsigned long) page_address(page);
581 r4k_blast_dcache_page(addr);
582 if (!cpu_icache_snoops_remote_store)
583 r4k_blast_scache_page(addr);
584 ClearPageDcacheDirty(page);
585 }
586
587 /*
588 * We're not sure of the virtual address(es) involved here, so
589 * we have to flush the entire I-cache.
590 */
591 if (cpu_has_vtag_icache) {
592 int cpu = smp_processor_id();
593
594 if (cpu_context(cpu, vma->vm_mm) != 0)
595 drop_mmu_context(vma->vm_mm, cpu);
596 } else
597 r4k_blast_icache();
598}
599
600static void r4k_flush_icache_page(struct vm_area_struct *vma,
601 struct page *page)
602{
603 struct flush_icache_page_args args;
604
605 /*
606 * If there's no context yet, or the page isn't executable, no I-cache
607 * flush is needed.
608 */
609 if (!(vma->vm_flags & VM_EXEC))
610 return;
611
612 args.vma = vma;
613 args.page = page;
614
7f3f1d01 615 r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
1da177e4
LT
616}
617
618
619#ifdef CONFIG_DMA_NONCOHERENT
620
621static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
622{
1da177e4
LT
623 /* Catch bad driver code */
624 BUG_ON(size == 0);
625
626 if (cpu_has_subset_pcaches) {
41700e73 627 if (size >= scache_size)
1da177e4 628 r4k_blast_scache();
41700e73
AN
629 else
630 blast_scache_range(addr, addr + size);
1da177e4
LT
631 return;
632 }
633
634 /*
635 * Either no secondary cache or the available caches don't have the
636 * subset property so we have to flush the primary caches
637 * explicitly
638 */
639 if (size >= dcache_size) {
640 r4k_blast_dcache();
641 } else {
1da177e4 642 R4600_HIT_CACHEOP_WAR_IMPL;
41700e73 643 blast_dcache_range(addr, addr + size);
1da177e4
LT
644 }
645
646 bc_wback_inv(addr, size);
647}
648
649static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
650{
1da177e4
LT
651 /* Catch bad driver code */
652 BUG_ON(size == 0);
653
654 if (cpu_has_subset_pcaches) {
41700e73 655 if (size >= scache_size)
1da177e4 656 r4k_blast_scache();
41700e73
AN
657 else
658 blast_scache_range(addr, addr + size);
1da177e4
LT
659 return;
660 }
661
662 if (size >= dcache_size) {
663 r4k_blast_dcache();
664 } else {
1da177e4 665 R4600_HIT_CACHEOP_WAR_IMPL;
41700e73 666 blast_dcache_range(addr, addr + size);
1da177e4
LT
667 }
668
669 bc_inv(addr, size);
670}
671#endif /* CONFIG_DMA_NONCOHERENT */
672
673/*
674 * While we're protected against bad userland addresses we don't care
675 * very much about what happens in that case. Usually a segmentation
676 * fault will dump the process later on anyway ...
677 */
678static void local_r4k_flush_cache_sigtramp(void * arg)
679{
02fe2c9c
TS
680 unsigned long ic_lsize = cpu_icache_line_size();
681 unsigned long dc_lsize = cpu_dcache_line_size();
682 unsigned long sc_lsize = cpu_scache_line_size();
1da177e4
LT
683 unsigned long addr = (unsigned long) arg;
684
685 R4600_HIT_CACHEOP_WAR_IMPL;
686 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
4debe4f9 687 if (!cpu_icache_snoops_remote_store && scache_size)
1da177e4
LT
688 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
689 protected_flush_icache_line(addr & ~(ic_lsize - 1));
690 if (MIPS4K_ICACHE_REFILL_WAR) {
691 __asm__ __volatile__ (
692 ".set push\n\t"
693 ".set noat\n\t"
694 ".set mips3\n\t"
875d43e7 695#ifdef CONFIG_32BIT
1da177e4
LT
696 "la $at,1f\n\t"
697#endif
875d43e7 698#ifdef CONFIG_64BIT
1da177e4
LT
699 "dla $at,1f\n\t"
700#endif
701 "cache %0,($at)\n\t"
702 "nop; nop; nop\n"
703 "1:\n\t"
704 ".set pop"
705 :
706 : "i" (Hit_Invalidate_I));
707 }
708 if (MIPS_CACHE_SYNC_WAR)
709 __asm__ __volatile__ ("sync");
710}
711
712static void r4k_flush_cache_sigtramp(unsigned long addr)
713{
7f3f1d01 714 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
1da177e4
LT
715}
716
717static void r4k_flush_icache_all(void)
718{
719 if (cpu_has_vtag_icache)
720 r4k_blast_icache();
721}
722
723static inline void rm7k_erratum31(void)
724{
725 const unsigned long ic_lsize = 32;
726 unsigned long addr;
727
728 /* RM7000 erratum #31. The icache is screwed at startup. */
729 write_c0_taglo(0);
730 write_c0_taghi(0);
731
732 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
733 __asm__ __volatile__ (
d8748a3a 734 ".set push\n\t"
1da177e4
LT
735 ".set noreorder\n\t"
736 ".set mips3\n\t"
737 "cache\t%1, 0(%0)\n\t"
738 "cache\t%1, 0x1000(%0)\n\t"
739 "cache\t%1, 0x2000(%0)\n\t"
740 "cache\t%1, 0x3000(%0)\n\t"
741 "cache\t%2, 0(%0)\n\t"
742 "cache\t%2, 0x1000(%0)\n\t"
743 "cache\t%2, 0x2000(%0)\n\t"
744 "cache\t%2, 0x3000(%0)\n\t"
745 "cache\t%1, 0(%0)\n\t"
746 "cache\t%1, 0x1000(%0)\n\t"
747 "cache\t%1, 0x2000(%0)\n\t"
748 "cache\t%1, 0x3000(%0)\n\t"
d8748a3a 749 ".set pop\n"
1da177e4
LT
750 :
751 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
752 }
753}
754
755static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
756 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
757};
758
759static void __init probe_pcache(void)
760{
761 struct cpuinfo_mips *c = &current_cpu_data;
762 unsigned int config = read_c0_config();
763 unsigned int prid = read_c0_prid();
764 unsigned long config1;
765 unsigned int lsize;
766
767 switch (c->cputype) {
768 case CPU_R4600: /* QED style two way caches? */
769 case CPU_R4700:
770 case CPU_R5000:
771 case CPU_NEVADA:
772 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
773 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
774 c->icache.ways = 2;
3c68da79 775 c->icache.waybit = __ffs(icache_size/2);
1da177e4
LT
776
777 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
778 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
779 c->dcache.ways = 2;
3c68da79 780 c->dcache.waybit= __ffs(dcache_size/2);
1da177e4
LT
781
782 c->options |= MIPS_CPU_CACHE_CDEX_P;
783 break;
784
785 case CPU_R5432:
786 case CPU_R5500:
787 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
788 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
789 c->icache.ways = 2;
790 c->icache.waybit= 0;
791
792 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
793 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
794 c->dcache.ways = 2;
795 c->dcache.waybit = 0;
796
797 c->options |= MIPS_CPU_CACHE_CDEX_P;
798 break;
799
800 case CPU_TX49XX:
801 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
802 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
803 c->icache.ways = 4;
804 c->icache.waybit= 0;
805
806 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
807 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
808 c->dcache.ways = 4;
809 c->dcache.waybit = 0;
810
811 c->options |= MIPS_CPU_CACHE_CDEX_P;
de862b48 812 c->options |= MIPS_CPU_PREFETCH;
1da177e4
LT
813 break;
814
815 case CPU_R4000PC:
816 case CPU_R4000SC:
817 case CPU_R4000MC:
818 case CPU_R4400PC:
819 case CPU_R4400SC:
820 case CPU_R4400MC:
821 case CPU_R4300:
822 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
823 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
824 c->icache.ways = 1;
825 c->icache.waybit = 0; /* doesn't matter */
826
827 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
828 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
829 c->dcache.ways = 1;
830 c->dcache.waybit = 0; /* does not matter */
831
832 c->options |= MIPS_CPU_CACHE_CDEX_P;
833 break;
834
835 case CPU_R10000:
836 case CPU_R12000:
44d921b2 837 case CPU_R14000:
1da177e4
LT
838 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
839 c->icache.linesz = 64;
840 c->icache.ways = 2;
841 c->icache.waybit = 0;
842
843 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
844 c->dcache.linesz = 32;
845 c->dcache.ways = 2;
846 c->dcache.waybit = 0;
847
848 c->options |= MIPS_CPU_PREFETCH;
849 break;
850
851 case CPU_VR4133:
852 write_c0_config(config & ~CONF_EB);
853 case CPU_VR4131:
854 /* Workaround for cache instruction bug of VR4131 */
855 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
856 c->processor_id == 0x0c82U) {
857 config &= ~0x00000030U;
858 config |= 0x00410000U;
859 write_c0_config(config);
860 }
861 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
862 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
863 c->icache.ways = 2;
3c68da79 864 c->icache.waybit = __ffs(icache_size/2);
1da177e4
LT
865
866 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
867 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
868 c->dcache.ways = 2;
3c68da79 869 c->dcache.waybit = __ffs(dcache_size/2);
1da177e4
LT
870
871 c->options |= MIPS_CPU_CACHE_CDEX_P;
872 break;
873
874 case CPU_VR41XX:
875 case CPU_VR4111:
876 case CPU_VR4121:
877 case CPU_VR4122:
878 case CPU_VR4181:
879 case CPU_VR4181A:
880 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
881 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
882 c->icache.ways = 1;
883 c->icache.waybit = 0; /* doesn't matter */
884
885 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
886 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
887 c->dcache.ways = 1;
888 c->dcache.waybit = 0; /* does not matter */
889
890 c->options |= MIPS_CPU_CACHE_CDEX_P;
891 break;
892
893 case CPU_RM7000:
894 rm7k_erratum31();
895
896 case CPU_RM9000:
897 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
898 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
899 c->icache.ways = 4;
3c68da79 900 c->icache.waybit = __ffs(icache_size / c->icache.ways);
1da177e4
LT
901
902 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
903 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
904 c->dcache.ways = 4;
3c68da79 905 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1da177e4
LT
906
907#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
908 c->options |= MIPS_CPU_CACHE_CDEX_P;
909#endif
910 c->options |= MIPS_CPU_PREFETCH;
911 break;
912
913 default:
914 if (!(config & MIPS_CONF_M))
915 panic("Don't know how to probe P-caches on this cpu.");
916
917 /*
918 * So we seem to be a MIPS32 or MIPS64 CPU
919 * So let's probe the I-cache ...
920 */
921 config1 = read_c0_config1();
922
923 if ((lsize = ((config1 >> 19) & 7)))
924 c->icache.linesz = 2 << lsize;
925 else
926 c->icache.linesz = lsize;
927 c->icache.sets = 64 << ((config1 >> 22) & 7);
928 c->icache.ways = 1 + ((config1 >> 16) & 7);
929
930 icache_size = c->icache.sets *
931 c->icache.ways *
932 c->icache.linesz;
3c68da79 933 c->icache.waybit = __ffs(icache_size/c->icache.ways);
1da177e4
LT
934
935 if (config & 0x8) /* VI bit */
936 c->icache.flags |= MIPS_CACHE_VTAG;
937
938 /*
939 * Now probe the MIPS32 / MIPS64 data cache.
940 */
941 c->dcache.flags = 0;
942
943 if ((lsize = ((config1 >> 10) & 7)))
944 c->dcache.linesz = 2 << lsize;
945 else
946 c->dcache.linesz= lsize;
947 c->dcache.sets = 64 << ((config1 >> 13) & 7);
948 c->dcache.ways = 1 + ((config1 >> 7) & 7);
949
950 dcache_size = c->dcache.sets *
951 c->dcache.ways *
952 c->dcache.linesz;
3c68da79 953 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1da177e4
LT
954
955 c->options |= MIPS_CPU_PREFETCH;
956 break;
957 }
958
959 /*
960 * Processor configuration sanity check for the R4000SC erratum
961 * #5. With page sizes larger than 32kB there is no possibility
962 * to get a VCE exception anymore so we don't care about this
963 * misconfiguration. The case is rather theoretical anyway;
964 * presumably no vendor is shipping his hardware in the "bad"
965 * configuration.
966 */
967 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
968 !(config & CONF_SC) && c->icache.linesz != 16 &&
969 PAGE_SIZE <= 0x8000)
970 panic("Improper R4000SC processor configuration detected");
971
972 /* compute a couple of other cache variables */
973 c->icache.waysize = icache_size / c->icache.ways;
974 c->dcache.waysize = dcache_size / c->dcache.ways;
975
976 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
977 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
978
979 /*
980 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
981 * 2-way virtually indexed so normally would suffer from aliases. So
982 * normally they'd suffer from aliases but magic in the hardware deals
983 * with that for us so we don't need to take care ourselves.
984 */
d1e344e5 985 switch (c->cputype) {
a95970f3 986 case CPU_20KC:
505403b6 987 case CPU_25KF:
de62893b 988 c->dcache.flags |= MIPS_CACHE_PINDEX;
d1e344e5
RB
989 case CPU_R10000:
990 case CPU_R12000:
44d921b2 991 case CPU_R14000:
a95970f3 992 case CPU_SB1:
d1e344e5
RB
993 break;
994 case CPU_24K:
98a41de9 995 case CPU_34K:
d1e344e5
RB
996 if (!(read_c0_config7() & (1 << 16)))
997 default:
ae6aafe3
RB
998 if (c->dcache.waysize > PAGE_SIZE)
999 c->dcache.flags |= MIPS_CACHE_ALIASES;
d1e344e5 1000 }
1da177e4
LT
1001
1002 switch (c->cputype) {
1003 case CPU_20KC:
1004 /*
1005 * Some older 20Kc chips doesn't have the 'VI' bit in
1006 * the config register.
1007 */
1008 c->icache.flags |= MIPS_CACHE_VTAG;
1009 break;
1010
e3ad1c23 1011 case CPU_AU1000:
1da177e4 1012 case CPU_AU1500:
e3ad1c23
PP
1013 case CPU_AU1100:
1014 case CPU_AU1550:
1015 case CPU_AU1200:
1da177e4
LT
1016 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1017 break;
1018 }
1019
1020 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1021 icache_size >> 10,
1022 cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
1023 way_string[c->icache.ways], c->icache.linesz);
1024
1025 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
1026 dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
1027}
1028
1029/*
1030 * If you even _breathe_ on this function, look at the gcc output and make sure
1031 * it does not pop things on and off the stack for the cache sizing loop that
1032 * executes in KSEG1 space or else you will crash and burn badly. You have
1033 * been warned.
1034 */
1035static int __init probe_scache(void)
1036{
1037 extern unsigned long stext;
1038 unsigned long flags, addr, begin, end, pow2;
1039 unsigned int config = read_c0_config();
1040 struct cpuinfo_mips *c = &current_cpu_data;
1041 int tmp;
1042
1043 if (config & CONF_SC)
1044 return 0;
1045
1046 begin = (unsigned long) &stext;
1047 begin &= ~((4 * 1024 * 1024) - 1);
1048 end = begin + (4 * 1024 * 1024);
1049
1050 /*
1051 * This is such a bitch, you'd think they would make it easy to do
1052 * this. Away you daemons of stupidity!
1053 */
1054 local_irq_save(flags);
1055
1056 /* Fill each size-multiple cache line with a valid tag. */
1057 pow2 = (64 * 1024);
1058 for (addr = begin; addr < end; addr = (begin + pow2)) {
1059 unsigned long *p = (unsigned long *) addr;
1060 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1061 pow2 <<= 1;
1062 }
1063
1064 /* Load first line with zero (therefore invalid) tag. */
1065 write_c0_taglo(0);
1066 write_c0_taghi(0);
1067 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1068 cache_op(Index_Store_Tag_I, begin);
1069 cache_op(Index_Store_Tag_D, begin);
1070 cache_op(Index_Store_Tag_SD, begin);
1071
1072 /* Now search for the wrap around point. */
1073 pow2 = (128 * 1024);
1074 tmp = 0;
1075 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1076 cache_op(Index_Load_Tag_SD, addr);
1077 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1078 if (!read_c0_taglo())
1079 break;
1080 pow2 <<= 1;
1081 }
1082 local_irq_restore(flags);
1083 addr -= begin;
1084
1085 scache_size = addr;
1086 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1087 c->scache.ways = 1;
1088 c->dcache.waybit = 0; /* does not matter */
1089
1090 return 1;
1091}
1092
1da177e4
LT
1093extern int r5k_sc_init(void);
1094extern int rm7k_sc_init(void);
1095
1096static void __init setup_scache(void)
1097{
1098 struct cpuinfo_mips *c = &current_cpu_data;
1099 unsigned int config = read_c0_config();
1da177e4
LT
1100 int sc_present = 0;
1101
1102 /*
1103 * Do the probing thing on R4000SC and R4400SC processors. Other
1104 * processors don't have a S-cache that would be relevant to the
1105 * Linux memory managment.
1106 */
1107 switch (c->cputype) {
1108 case CPU_R4000SC:
1109 case CPU_R4000MC:
1110 case CPU_R4400SC:
1111 case CPU_R4400MC:
ba5187db 1112 sc_present = run_uncached(probe_scache);
1da177e4
LT
1113 if (sc_present)
1114 c->options |= MIPS_CPU_CACHE_CDEX_S;
1115 break;
1116
1117 case CPU_R10000:
1118 case CPU_R12000:
44d921b2 1119 case CPU_R14000:
1da177e4
LT
1120 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1121 c->scache.linesz = 64 << ((config >> 13) & 1);
1122 c->scache.ways = 2;
1123 c->scache.waybit= 0;
1124 sc_present = 1;
1125 break;
1126
1127 case CPU_R5000:
1128 case CPU_NEVADA:
1129#ifdef CONFIG_R5000_CPU_SCACHE
1130 r5k_sc_init();
1131#endif
1132 return;
1133
1134 case CPU_RM7000:
1135 case CPU_RM9000:
1136#ifdef CONFIG_RM7000_CPU_SCACHE
1137 rm7k_sc_init();
1138#endif
1139 return;
1140
1141 default:
1142 sc_present = 0;
1143 }
1144
1145 if (!sc_present)
1146 return;
1147
e7958bb9
RB
1148 if ((c->isa_level == MIPS_CPU_ISA_M32R1 ||
1149 c->isa_level == MIPS_CPU_ISA_M64R1) &&
1da177e4
LT
1150 !(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1151 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1152
1153 /* compute a couple of other cache variables */
1154 c->scache.waysize = scache_size / c->scache.ways;
1155
1156 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1157
1158 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1159 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1160
1161 c->options |= MIPS_CPU_SUBSET_CACHES;
1162}
1163
9370b351
SS
1164void au1x00_fixup_config_od(void)
1165{
1166 /*
1167 * c0_config.od (bit 19) was write only (and read as 0)
1168 * on the early revisions of Alchemy SOCs. It disables the bus
1169 * transaction overlapping and needs to be set to fix various errata.
1170 */
1171 switch (read_c0_prid()) {
1172 case 0x00030100: /* Au1000 DA */
1173 case 0x00030201: /* Au1000 HA */
1174 case 0x00030202: /* Au1000 HB */
1175 case 0x01030200: /* Au1500 AB */
1176 /*
1177 * Au1100 errata actually keeps silence about this bit, so we set it
1178 * just in case for those revisions that require it to be set according
1179 * to arch/mips/au1000/common/cputable.c
1180 */
1181 case 0x02030200: /* Au1100 AB */
1182 case 0x02030201: /* Au1100 BA */
1183 case 0x02030202: /* Au1100 BC */
1184 set_c0_config(1 << 19);
1185 break;
1186 }
1187}
1188
1da177e4
LT
1189static inline void coherency_setup(void)
1190{
1191 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1192
1193 /*
1194 * c0_status.cu=0 specifies that updates by the sc instruction use
1195 * the coherency mode specified by the TLB; 1 means cachable
1196 * coherent update on write will be used. Not all processors have
1197 * this bit and; some wire it to zero, others like Toshiba had the
1198 * silly idea of putting something else there ...
1199 */
1200 switch (current_cpu_data.cputype) {
1201 case CPU_R4000PC:
1202 case CPU_R4000SC:
1203 case CPU_R4000MC:
1204 case CPU_R4400PC:
1205 case CPU_R4400SC:
1206 case CPU_R4400MC:
1207 clear_c0_config(CONF_CU);
1208 break;
9370b351
SS
1209 /*
1210 * We need to catch the ealry Alchemy SOCs with
1211 * the write-only co_config.od bit and set it back to one...
1212 */
1213 case CPU_AU1000: /* rev. DA, HA, HB */
1214 case CPU_AU1100: /* rev. AB, BA, BC ?? */
1215 case CPU_AU1500: /* rev. AB */
1216 au1x00_fixup_config_od();
1217 break;
1da177e4
LT
1218 }
1219}
1220
02cf2119 1221void __init r4k_cache_init(void)
1da177e4
LT
1222{
1223 extern void build_clear_page(void);
1224 extern void build_copy_page(void);
1225 extern char except_vec2_generic;
1226 struct cpuinfo_mips *c = &current_cpu_data;
1227
1228 /* Default cache error handler for R4000 and R5000 family */
e01402b1 1229 set_uncached_handler (0x100, &except_vec2_generic, 0x80);
1da177e4
LT
1230
1231 probe_pcache();
1232 setup_scache();
1233
1da177e4
LT
1234 r4k_blast_dcache_page_setup();
1235 r4k_blast_dcache_page_indexed_setup();
1236 r4k_blast_dcache_setup();
1237 r4k_blast_icache_page_setup();
1238 r4k_blast_icache_page_indexed_setup();
1239 r4k_blast_icache_setup();
1240 r4k_blast_scache_page_setup();
1241 r4k_blast_scache_page_indexed_setup();
1242 r4k_blast_scache_setup();
1243
1244 /*
1245 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1246 * This code supports virtually indexed processors and will be
1247 * unnecessarily inefficient on physically indexed processors.
1248 */
1249 shm_align_mask = max_t( unsigned long,
1250 c->dcache.sets * c->dcache.linesz - 1,
1251 PAGE_SIZE - 1);
1252
1253 flush_cache_all = r4k_flush_cache_all;
1254 __flush_cache_all = r4k___flush_cache_all;
1255 flush_cache_mm = r4k_flush_cache_mm;
1256 flush_cache_page = r4k_flush_cache_page;
1257 flush_icache_page = r4k_flush_icache_page;
1258 flush_cache_range = r4k_flush_cache_range;
1259
1260 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1261 flush_icache_all = r4k_flush_icache_all;
7e3bfc7c 1262 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1da177e4
LT
1263 flush_data_cache_page = r4k_flush_data_cache_page;
1264 flush_icache_range = r4k_flush_icache_range;
1265
1266#ifdef CONFIG_DMA_NONCOHERENT
1267 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1268 _dma_cache_wback = r4k_dma_cache_wback_inv;
1269 _dma_cache_inv = r4k_dma_cache_inv;
1270#endif
1271
1da177e4
LT
1272 build_clear_page();
1273 build_copy_page();
1d40cfcd
RB
1274 local_r4k___flush_cache_all(NULL);
1275 coherency_setup();
1da177e4 1276}