x86, bts: DS and BTS initialization
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / blackfin / kernel / cplb-nompu / cplbinit.c
1 /*
2 * Blackfin CPLB initialization
3 *
4 * Copyright 2004-2007 Analog Devices Inc.
5 *
6 * Bugs: Enter bugs at http://blackfin.uclinux.org/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see the file COPYING, or write
20 * to the Free Software Foundation, Inc.,
21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23 #include <linux/module.h>
24
25 #include <asm/blackfin.h>
26 #include <asm/cacheflush.h>
27 #include <asm/cplb.h>
28 #include <asm/cplbinit.h>
29
30 #define CPLB_MEM CONFIG_MAX_MEM_SIZE
31
32 /*
33 * Number of required data CPLB switchtable entries
34 * MEMSIZE / 4 (we mostly install 4M page size CPLBs
35 * approx 16 for smaller 1MB page size CPLBs for allignment purposes
36 * 1 for L1 Data Memory
37 * possibly 1 for L2 Data Memory
38 * 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
39 * 1 for ASYNC Memory
40 */
41 #define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \
42 + ASYNC_MEMORY_CPLB_COVERAGE) * 2)
43
44 /*
45 * Number of required instruction CPLB switchtable entries
46 * MEMSIZE / 4 (we mostly install 4M page size CPLBs
47 * approx 12 for smaller 1MB page size CPLBs for allignment purposes
48 * 1 for L1 Instruction Memory
49 * possibly 1 for L2 Instruction Memory
50 * 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
51 */
52 #define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2)
53
54
55 u_long icplb_table[MAX_CPLBS + 1];
56 u_long dcplb_table[MAX_CPLBS + 1];
57
58 #ifdef CONFIG_CPLB_SWITCH_TAB_L1
59 # define PDT_ATTR __attribute__((l1_data))
60 #else
61 # define PDT_ATTR
62 #endif
63
64 u_long ipdt_table[MAX_SWITCH_I_CPLBS + 1] PDT_ATTR;
65 u_long dpdt_table[MAX_SWITCH_D_CPLBS + 1] PDT_ATTR;
66
67 #ifdef CONFIG_CPLB_INFO
68 u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS] PDT_ATTR;
69 u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS] PDT_ATTR;
70 #endif
71
72 struct s_cplb {
73 struct cplb_tab init_i;
74 struct cplb_tab init_d;
75 struct cplb_tab switch_i;
76 struct cplb_tab switch_d;
77 };
78
79 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
80 static struct cplb_desc cplb_data[] = {
81 {
82 .start = 0,
83 .end = SIZE_1K,
84 .psize = SIZE_1K,
85 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
86 .i_conf = SDRAM_OOPS,
87 .d_conf = SDRAM_OOPS,
88 #if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
89 .valid = 1,
90 #else
91 .valid = 0,
92 #endif
93 .name = "Zero Pointer Guard Page",
94 },
95 {
96 .start = L1_CODE_START,
97 .end = L1_CODE_START + L1_CODE_LENGTH,
98 .psize = SIZE_4M,
99 .attr = INITIAL_T | SWITCH_T | I_CPLB,
100 .i_conf = L1_IMEMORY,
101 .d_conf = 0,
102 .valid = 1,
103 .name = "L1 I-Memory",
104 },
105 {
106 .start = L1_DATA_A_START,
107 .end = L1_DATA_B_START + L1_DATA_B_LENGTH,
108 .psize = SIZE_4M,
109 .attr = INITIAL_T | SWITCH_T | D_CPLB,
110 .i_conf = 0,
111 .d_conf = L1_DMEMORY,
112 #if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
113 .valid = 1,
114 #else
115 .valid = 0,
116 #endif
117 .name = "L1 D-Memory",
118 },
119 {
120 .start = 0,
121 .end = 0, /* dynamic */
122 .psize = 0,
123 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
124 .i_conf = SDRAM_IGENERIC,
125 .d_conf = SDRAM_DGENERIC,
126 .valid = 1,
127 .name = "Kernel Memory",
128 },
129 {
130 .start = 0, /* dynamic */
131 .end = 0, /* dynamic */
132 .psize = 0,
133 .attr = INITIAL_T | SWITCH_T | D_CPLB,
134 .i_conf = SDRAM_IGENERIC,
135 .d_conf = SDRAM_DNON_CHBL,
136 .valid = 1,
137 .name = "uClinux MTD Memory",
138 },
139 {
140 .start = 0, /* dynamic */
141 .end = 0, /* dynamic */
142 .psize = SIZE_1M,
143 .attr = INITIAL_T | SWITCH_T | D_CPLB,
144 .d_conf = SDRAM_DNON_CHBL,
145 .valid = 1,
146 .name = "Uncached DMA Zone",
147 },
148 {
149 .start = 0, /* dynamic */
150 .end = 0, /* dynamic */
151 .psize = 0,
152 .attr = SWITCH_T | D_CPLB,
153 .i_conf = 0, /* dynamic */
154 .d_conf = 0, /* dynamic */
155 .valid = 1,
156 .name = "Reserved Memory",
157 },
158 {
159 .start = ASYNC_BANK0_BASE,
160 .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
161 .psize = 0,
162 .attr = SWITCH_T | D_CPLB,
163 .d_conf = SDRAM_EBIU,
164 .valid = 1,
165 .name = "Asynchronous Memory Banks",
166 },
167 {
168 .start = L2_START,
169 .end = L2_START + L2_LENGTH,
170 .psize = SIZE_1M,
171 .attr = SWITCH_T | I_CPLB | D_CPLB,
172 .i_conf = L2_IMEMORY,
173 .d_conf = L2_DMEMORY,
174 .valid = (L2_LENGTH > 0),
175 .name = "L2 Memory",
176 },
177 {
178 .start = BOOT_ROM_START,
179 .end = BOOT_ROM_START + BOOT_ROM_LENGTH,
180 .psize = SIZE_1M,
181 .attr = SWITCH_T | I_CPLB | D_CPLB,
182 .i_conf = SDRAM_IGENERIC,
183 .d_conf = SDRAM_DGENERIC,
184 .valid = 1,
185 .name = "On-Chip BootROM",
186 },
187 };
188
189 static u16 __init lock_kernel_check(u32 start, u32 end)
190 {
191 if ((end <= (u32) _end && end >= (u32)_stext) ||
192 (start <= (u32) _end && start >= (u32)_stext))
193 return IN_KERNEL;
194 return 0;
195 }
196
197 static unsigned short __init
198 fill_cplbtab(struct cplb_tab *table,
199 unsigned long start, unsigned long end,
200 unsigned long block_size, unsigned long cplb_data)
201 {
202 int i;
203
204 switch (block_size) {
205 case SIZE_4M:
206 i = 3;
207 break;
208 case SIZE_1M:
209 i = 2;
210 break;
211 case SIZE_4K:
212 i = 1;
213 break;
214 case SIZE_1K:
215 default:
216 i = 0;
217 break;
218 }
219
220 cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
221
222 while ((start < end) && (table->pos < table->size)) {
223
224 table->tab[table->pos++] = start;
225
226 if (lock_kernel_check(start, start + block_size) == IN_KERNEL)
227 table->tab[table->pos++] =
228 cplb_data | CPLB_LOCK | CPLB_DIRTY;
229 else
230 table->tab[table->pos++] = cplb_data;
231
232 start += block_size;
233 }
234 return 0;
235 }
236
237 static unsigned short __init
238 close_cplbtab(struct cplb_tab *table)
239 {
240
241 while (table->pos < table->size) {
242
243 table->tab[table->pos++] = 0;
244 table->tab[table->pos++] = 0; /* !CPLB_VALID */
245 }
246 return 0;
247 }
248
249 /* helper function */
250 static void __init
251 __fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
252 {
253 if (cplb_data[i].psize) {
254 fill_cplbtab(t,
255 cplb_data[i].start,
256 cplb_data[i].end,
257 cplb_data[i].psize,
258 cplb_data[i].i_conf);
259 } else {
260 #if defined(CONFIG_BFIN_ICACHE)
261 if (ANOMALY_05000263 && i == SDRAM_KERN) {
262 fill_cplbtab(t,
263 cplb_data[i].start,
264 cplb_data[i].end,
265 SIZE_4M,
266 cplb_data[i].i_conf);
267 } else
268 #endif
269 {
270 fill_cplbtab(t,
271 cplb_data[i].start,
272 a_start,
273 SIZE_1M,
274 cplb_data[i].i_conf);
275 fill_cplbtab(t,
276 a_start,
277 a_end,
278 SIZE_4M,
279 cplb_data[i].i_conf);
280 fill_cplbtab(t, a_end,
281 cplb_data[i].end,
282 SIZE_1M,
283 cplb_data[i].i_conf);
284 }
285 }
286 }
287
288 static void __init
289 __fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
290 {
291 if (cplb_data[i].psize) {
292 fill_cplbtab(t,
293 cplb_data[i].start,
294 cplb_data[i].end,
295 cplb_data[i].psize,
296 cplb_data[i].d_conf);
297 } else {
298 fill_cplbtab(t,
299 cplb_data[i].start,
300 a_start, SIZE_1M,
301 cplb_data[i].d_conf);
302 fill_cplbtab(t, a_start,
303 a_end, SIZE_4M,
304 cplb_data[i].d_conf);
305 fill_cplbtab(t, a_end,
306 cplb_data[i].end,
307 SIZE_1M,
308 cplb_data[i].d_conf);
309 }
310 }
311
312 void __init generate_cplb_tables(void)
313 {
314
315 u16 i, j, process;
316 u32 a_start, a_end, as, ae, as_1m;
317
318 struct cplb_tab *t_i = NULL;
319 struct cplb_tab *t_d = NULL;
320 struct s_cplb cplb;
321
322 printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n");
323
324 cplb.init_i.size = MAX_CPLBS;
325 cplb.init_d.size = MAX_CPLBS;
326 cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
327 cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
328
329 cplb.init_i.pos = 0;
330 cplb.init_d.pos = 0;
331 cplb.switch_i.pos = 0;
332 cplb.switch_d.pos = 0;
333
334 cplb.init_i.tab = icplb_table;
335 cplb.init_d.tab = dcplb_table;
336 cplb.switch_i.tab = ipdt_table;
337 cplb.switch_d.tab = dpdt_table;
338
339 cplb_data[SDRAM_KERN].end = memory_end;
340
341 #ifdef CONFIG_MTD_UCLINUX
342 cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start;
343 cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
344 cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
345 # if defined(CONFIG_ROMFS_FS)
346 cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
347
348 /*
349 * The ROMFS_FS size is often not multiple of 1MB.
350 * This can cause multiple CPLB sets covering the same memory area.
351 * This will then cause multiple CPLB hit exceptions.
352 * Workaround: We ensure a contiguous memory area by extending the kernel
353 * memory section over the mtd section.
354 * For ROMFS_FS memory must be covered with ICPLBs anyways.
355 * So there is no difference between kernel and mtd memory setup.
356 */
357
358 cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
359 cplb_data[SDRAM_RAM_MTD].valid = 0;
360
361 # endif
362 #else
363 cplb_data[SDRAM_RAM_MTD].valid = 0;
364 #endif
365
366 cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION;
367 cplb_data[SDRAM_DMAZ].end = _ramend;
368
369 cplb_data[RES_MEM].start = _ramend;
370 cplb_data[RES_MEM].end = physical_mem_end;
371
372 if (reserved_mem_dcache_on)
373 cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
374 else
375 cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
376
377 if (reserved_mem_icache_on)
378 cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
379 else
380 cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
381
382 for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) {
383 if (!cplb_data[i].valid)
384 continue;
385
386 as_1m = cplb_data[i].start % SIZE_1M;
387
388 /* We need to make sure all sections are properly 1M aligned
389 * However between Kernel Memory and the Kernel mtd section, depending on the
390 * rootfs size, there can be overlapping memory areas.
391 */
392
393 if (as_1m && i != L1I_MEM && i != L1D_MEM) {
394 #ifdef CONFIG_MTD_UCLINUX
395 if (i == SDRAM_RAM_MTD) {
396 if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start)
397 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M;
398 else
399 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M));
400 } else
401 #endif
402 printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n",
403 cplb_data[i].name, cplb_data[i].start);
404 }
405
406 as = cplb_data[i].start % SIZE_4M;
407 ae = cplb_data[i].end % SIZE_4M;
408
409 if (as)
410 a_start = cplb_data[i].start + (SIZE_4M - (as));
411 else
412 a_start = cplb_data[i].start;
413
414 a_end = cplb_data[i].end - ae;
415
416 for (j = INITIAL_T; j <= SWITCH_T; j++) {
417
418 switch (j) {
419 case INITIAL_T:
420 if (cplb_data[i].attr & INITIAL_T) {
421 t_i = &cplb.init_i;
422 t_d = &cplb.init_d;
423 process = 1;
424 } else
425 process = 0;
426 break;
427 case SWITCH_T:
428 if (cplb_data[i].attr & SWITCH_T) {
429 t_i = &cplb.switch_i;
430 t_d = &cplb.switch_d;
431 process = 1;
432 } else
433 process = 0;
434 break;
435 default:
436 process = 0;
437 break;
438 }
439
440 if (!process)
441 continue;
442 if (cplb_data[i].attr & I_CPLB)
443 __fill_code_cplbtab(t_i, i, a_start, a_end);
444
445 if (cplb_data[i].attr & D_CPLB)
446 __fill_data_cplbtab(t_d, i, a_start, a_end);
447 }
448 }
449
450 /* close tables */
451
452 close_cplbtab(&cplb.init_i);
453 close_cplbtab(&cplb.init_d);
454
455 cplb.init_i.tab[cplb.init_i.pos] = -1;
456 cplb.init_d.tab[cplb.init_d.pos] = -1;
457 cplb.switch_i.tab[cplb.switch_i.pos] = -1;
458 cplb.switch_d.tab[cplb.switch_d.pos] = -1;
459
460 }
461
462 #endif
463