Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sh / mm / pmb.c
1 /*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
6 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
19 #include <linux/debugfs.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/err.h>
23 #include <linux/io.h>
24 #include <linux/spinlock.h>
25 #include <linux/vmalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sizes.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/page.h>
32 #include <asm/mmu.h>
33 #include <asm/mmu_context.h>
34
35 struct pmb_entry;
36
37 struct pmb_entry {
38 unsigned long vpn;
39 unsigned long ppn;
40 unsigned long flags;
41 unsigned long size;
42
43 spinlock_t lock;
44
45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
48 */
49 int entry;
50
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
53 };
54
55 static struct {
56 unsigned long size;
57 int flag;
58 } pmb_sizes[] = {
59 { .size = SZ_512M, .flag = PMB_SZ_512M, },
60 { .size = SZ_128M, .flag = PMB_SZ_128M, },
61 { .size = SZ_64M, .flag = PMB_SZ_64M, },
62 { .size = SZ_16M, .flag = PMB_SZ_16M, },
63 };
64
65 static void pmb_unmap_entry(struct pmb_entry *, int depth);
66
67 static DEFINE_RWLOCK(pmb_rwlock);
68 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
69 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
70
71 static unsigned int pmb_iomapping_enabled;
72
73 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
74 {
75 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
76 }
77
78 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
79 {
80 return mk_pmb_entry(entry) | PMB_ADDR;
81 }
82
83 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
84 {
85 return mk_pmb_entry(entry) | PMB_DATA;
86 }
87
88 static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
89 {
90 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
91 }
92
93 /*
94 * Ensure that the PMB entries match our cache configuration.
95 *
96 * When we are in 32-bit address extended mode, CCR.CB becomes
97 * invalid, so care must be taken to manually adjust cacheable
98 * translations.
99 */
100 static __always_inline unsigned long pmb_cache_flags(void)
101 {
102 unsigned long flags = 0;
103
104 #if defined(CONFIG_CACHE_OFF)
105 flags |= PMB_WT | PMB_UB;
106 #elif defined(CONFIG_CACHE_WRITETHROUGH)
107 flags |= PMB_C | PMB_WT | PMB_UB;
108 #elif defined(CONFIG_CACHE_WRITEBACK)
109 flags |= PMB_C;
110 #endif
111
112 return flags;
113 }
114
115 /*
116 * Convert typical pgprot value to the PMB equivalent
117 */
118 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
119 {
120 unsigned long pmb_flags = 0;
121 u64 flags = pgprot_val(prot);
122
123 if (flags & _PAGE_CACHABLE)
124 pmb_flags |= PMB_C;
125 if (flags & _PAGE_WT)
126 pmb_flags |= PMB_WT | PMB_UB;
127
128 return pmb_flags;
129 }
130
131 static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
132 {
133 return (b->vpn == (a->vpn + a->size)) &&
134 (b->ppn == (a->ppn + a->size)) &&
135 (b->flags == a->flags);
136 }
137
138 static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
139 unsigned long size)
140 {
141 int i;
142
143 read_lock(&pmb_rwlock);
144
145 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
146 struct pmb_entry *pmbe, *iter;
147 unsigned long span;
148
149 if (!test_bit(i, pmb_map))
150 continue;
151
152 pmbe = &pmb_entry_list[i];
153
154 /*
155 * See if VPN and PPN are bounded by an existing mapping.
156 */
157 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
158 continue;
159 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
160 continue;
161
162 /*
163 * Now see if we're in range of a simple mapping.
164 */
165 if (size <= pmbe->size) {
166 read_unlock(&pmb_rwlock);
167 return true;
168 }
169
170 span = pmbe->size;
171
172 /*
173 * Finally for sizes that involve compound mappings, walk
174 * the chain.
175 */
176 for (iter = pmbe->link; iter; iter = iter->link)
177 span += iter->size;
178
179 /*
180 * Nothing else to do if the range requirements are met.
181 */
182 if (size <= span) {
183 read_unlock(&pmb_rwlock);
184 return true;
185 }
186 }
187
188 read_unlock(&pmb_rwlock);
189 return false;
190 }
191
192 static bool pmb_size_valid(unsigned long size)
193 {
194 int i;
195
196 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
197 if (pmb_sizes[i].size == size)
198 return true;
199
200 return false;
201 }
202
203 static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
204 {
205 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
206 }
207
208 static inline bool pmb_prot_valid(pgprot_t prot)
209 {
210 return (pgprot_val(prot) & _PAGE_USER) == 0;
211 }
212
213 static int pmb_size_to_flags(unsigned long size)
214 {
215 int i;
216
217 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
218 if (pmb_sizes[i].size == size)
219 return pmb_sizes[i].flag;
220
221 return 0;
222 }
223
224 static int pmb_alloc_entry(void)
225 {
226 int pos;
227
228 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
229 if (pos >= 0 && pos < NR_PMB_ENTRIES)
230 __set_bit(pos, pmb_map);
231 else
232 pos = -ENOSPC;
233
234 return pos;
235 }
236
237 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
238 unsigned long flags, int entry)
239 {
240 struct pmb_entry *pmbe;
241 unsigned long irqflags;
242 void *ret = NULL;
243 int pos;
244
245 write_lock_irqsave(&pmb_rwlock, irqflags);
246
247 if (entry == PMB_NO_ENTRY) {
248 pos = pmb_alloc_entry();
249 if (unlikely(pos < 0)) {
250 ret = ERR_PTR(pos);
251 goto out;
252 }
253 } else {
254 if (__test_and_set_bit(entry, pmb_map)) {
255 ret = ERR_PTR(-ENOSPC);
256 goto out;
257 }
258
259 pos = entry;
260 }
261
262 write_unlock_irqrestore(&pmb_rwlock, irqflags);
263
264 pmbe = &pmb_entry_list[pos];
265
266 memset(pmbe, 0, sizeof(struct pmb_entry));
267
268 spin_lock_init(&pmbe->lock);
269
270 pmbe->vpn = vpn;
271 pmbe->ppn = ppn;
272 pmbe->flags = flags;
273 pmbe->entry = pos;
274
275 return pmbe;
276
277 out:
278 write_unlock_irqrestore(&pmb_rwlock, irqflags);
279 return ret;
280 }
281
282 static void pmb_free(struct pmb_entry *pmbe)
283 {
284 __clear_bit(pmbe->entry, pmb_map);
285
286 pmbe->entry = PMB_NO_ENTRY;
287 pmbe->link = NULL;
288 }
289
290 /*
291 * Must be run uncached.
292 */
293 static void __set_pmb_entry(struct pmb_entry *pmbe)
294 {
295 unsigned long addr, data;
296
297 addr = mk_pmb_addr(pmbe->entry);
298 data = mk_pmb_data(pmbe->entry);
299
300 jump_to_uncached();
301
302 /* Set V-bit */
303 __raw_writel(pmbe->vpn | PMB_V, addr);
304 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
305
306 back_to_cached();
307 }
308
309 static void __clear_pmb_entry(struct pmb_entry *pmbe)
310 {
311 unsigned long addr, data;
312 unsigned long addr_val, data_val;
313
314 addr = mk_pmb_addr(pmbe->entry);
315 data = mk_pmb_data(pmbe->entry);
316
317 addr_val = __raw_readl(addr);
318 data_val = __raw_readl(data);
319
320 /* Clear V-bit */
321 writel_uncached(addr_val & ~PMB_V, addr);
322 writel_uncached(data_val & ~PMB_V, data);
323 }
324
325 #ifdef CONFIG_PM
326 static void set_pmb_entry(struct pmb_entry *pmbe)
327 {
328 unsigned long flags;
329
330 spin_lock_irqsave(&pmbe->lock, flags);
331 __set_pmb_entry(pmbe);
332 spin_unlock_irqrestore(&pmbe->lock, flags);
333 }
334 #endif /* CONFIG_PM */
335
336 int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
337 unsigned long size, pgprot_t prot)
338 {
339 struct pmb_entry *pmbp, *pmbe;
340 unsigned long orig_addr, orig_size;
341 unsigned long flags, pmb_flags;
342 int i, mapped;
343
344 if (size < SZ_16M)
345 return -EINVAL;
346 if (!pmb_addr_valid(vaddr, size))
347 return -EFAULT;
348 if (pmb_mapping_exists(vaddr, phys, size))
349 return 0;
350
351 orig_addr = vaddr;
352 orig_size = size;
353
354 flush_tlb_kernel_range(vaddr, vaddr + size);
355
356 pmb_flags = pgprot_to_pmb_flags(prot);
357 pmbp = NULL;
358
359 do {
360 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
361 if (size < pmb_sizes[i].size)
362 continue;
363
364 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
365 pmb_sizes[i].flag, PMB_NO_ENTRY);
366 if (IS_ERR(pmbe)) {
367 pmb_unmap_entry(pmbp, mapped);
368 return PTR_ERR(pmbe);
369 }
370
371 spin_lock_irqsave(&pmbe->lock, flags);
372
373 pmbe->size = pmb_sizes[i].size;
374
375 __set_pmb_entry(pmbe);
376
377 phys += pmbe->size;
378 vaddr += pmbe->size;
379 size -= pmbe->size;
380
381 /*
382 * Link adjacent entries that span multiple PMB
383 * entries for easier tear-down.
384 */
385 if (likely(pmbp)) {
386 spin_lock(&pmbp->lock);
387 pmbp->link = pmbe;
388 spin_unlock(&pmbp->lock);
389 }
390
391 pmbp = pmbe;
392
393 /*
394 * Instead of trying smaller sizes on every
395 * iteration (even if we succeed in allocating
396 * space), try using pmb_sizes[i].size again.
397 */
398 i--;
399 mapped++;
400
401 spin_unlock_irqrestore(&pmbe->lock, flags);
402 }
403 } while (size >= SZ_16M);
404
405 flush_cache_vmap(orig_addr, orig_addr + orig_size);
406
407 return 0;
408 }
409
410 void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
411 pgprot_t prot, void *caller)
412 {
413 unsigned long vaddr;
414 phys_addr_t offset, last_addr;
415 phys_addr_t align_mask;
416 unsigned long aligned;
417 struct vm_struct *area;
418 int i, ret;
419
420 if (!pmb_iomapping_enabled)
421 return NULL;
422
423 /*
424 * Small mappings need to go through the TLB.
425 */
426 if (size < SZ_16M)
427 return ERR_PTR(-EINVAL);
428 if (!pmb_prot_valid(prot))
429 return ERR_PTR(-EINVAL);
430
431 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
432 if (size >= pmb_sizes[i].size)
433 break;
434
435 last_addr = phys + size;
436 align_mask = ~(pmb_sizes[i].size - 1);
437 offset = phys & ~align_mask;
438 phys &= align_mask;
439 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
440
441 /*
442 * XXX: This should really start from uncached_end, but this
443 * causes the MMU to reset, so for now we restrict it to the
444 * 0xb000...0xc000 range.
445 */
446 area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
447 P3SEG, caller);
448 if (!area)
449 return NULL;
450
451 area->phys_addr = phys;
452 vaddr = (unsigned long)area->addr;
453
454 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
455 if (unlikely(ret != 0))
456 return ERR_PTR(ret);
457
458 return (void __iomem *)(offset + (char *)vaddr);
459 }
460
461 int pmb_unmap(void __iomem *addr)
462 {
463 struct pmb_entry *pmbe = NULL;
464 unsigned long vaddr = (unsigned long __force)addr;
465 int i, found = 0;
466
467 read_lock(&pmb_rwlock);
468
469 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
470 if (test_bit(i, pmb_map)) {
471 pmbe = &pmb_entry_list[i];
472 if (pmbe->vpn == vaddr) {
473 found = 1;
474 break;
475 }
476 }
477 }
478
479 read_unlock(&pmb_rwlock);
480
481 if (found) {
482 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
483 return 0;
484 }
485
486 return -EINVAL;
487 }
488
489 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
490 {
491 do {
492 struct pmb_entry *pmblink = pmbe;
493
494 /*
495 * We may be called before this pmb_entry has been
496 * entered into the PMB table via set_pmb_entry(), but
497 * that's OK because we've allocated a unique slot for
498 * this entry in pmb_alloc() (even if we haven't filled
499 * it yet).
500 *
501 * Therefore, calling __clear_pmb_entry() is safe as no
502 * other mapping can be using that slot.
503 */
504 __clear_pmb_entry(pmbe);
505
506 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
507
508 pmbe = pmblink->link;
509
510 pmb_free(pmblink);
511 } while (pmbe && --depth);
512 }
513
514 static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
515 {
516 unsigned long flags;
517
518 if (unlikely(!pmbe))
519 return;
520
521 write_lock_irqsave(&pmb_rwlock, flags);
522 __pmb_unmap_entry(pmbe, depth);
523 write_unlock_irqrestore(&pmb_rwlock, flags);
524 }
525
526 static void __init pmb_notify(void)
527 {
528 int i;
529
530 pr_info("PMB: boot mappings:\n");
531
532 read_lock(&pmb_rwlock);
533
534 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
535 struct pmb_entry *pmbe;
536
537 if (!test_bit(i, pmb_map))
538 continue;
539
540 pmbe = &pmb_entry_list[i];
541
542 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
543 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
544 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
545 }
546
547 read_unlock(&pmb_rwlock);
548 }
549
550 /*
551 * Sync our software copy of the PMB mappings with those in hardware. The
552 * mappings in the hardware PMB were either set up by the bootloader or
553 * very early on by the kernel.
554 */
555 static void __init pmb_synchronize(void)
556 {
557 struct pmb_entry *pmbp = NULL;
558 int i, j;
559
560 /*
561 * Run through the initial boot mappings, log the established
562 * ones, and blow away anything that falls outside of the valid
563 * PPN range. Specifically, we only care about existing mappings
564 * that impact the cached/uncached sections.
565 *
566 * Note that touching these can be a bit of a minefield; the boot
567 * loader can establish multi-page mappings with the same caching
568 * attributes, so we need to ensure that we aren't modifying a
569 * mapping that we're presently executing from, or may execute
570 * from in the case of straddling page boundaries.
571 *
572 * In the future we will have to tidy up after the boot loader by
573 * jumping between the cached and uncached mappings and tearing
574 * down alternating mappings while executing from the other.
575 */
576 for (i = 0; i < NR_PMB_ENTRIES; i++) {
577 unsigned long addr, data;
578 unsigned long addr_val, data_val;
579 unsigned long ppn, vpn, flags;
580 unsigned long irqflags;
581 unsigned int size;
582 struct pmb_entry *pmbe;
583
584 addr = mk_pmb_addr(i);
585 data = mk_pmb_data(i);
586
587 addr_val = __raw_readl(addr);
588 data_val = __raw_readl(data);
589
590 /*
591 * Skip over any bogus entries
592 */
593 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
594 continue;
595
596 ppn = data_val & PMB_PFN_MASK;
597 vpn = addr_val & PMB_PFN_MASK;
598
599 /*
600 * Only preserve in-range mappings.
601 */
602 if (!pmb_ppn_in_range(ppn)) {
603 /*
604 * Invalidate anything out of bounds.
605 */
606 writel_uncached(addr_val & ~PMB_V, addr);
607 writel_uncached(data_val & ~PMB_V, data);
608 continue;
609 }
610
611 /*
612 * Update the caching attributes if necessary
613 */
614 if (data_val & PMB_C) {
615 data_val &= ~PMB_CACHE_MASK;
616 data_val |= pmb_cache_flags();
617
618 writel_uncached(data_val, data);
619 }
620
621 size = data_val & PMB_SZ_MASK;
622 flags = size | (data_val & PMB_CACHE_MASK);
623
624 pmbe = pmb_alloc(vpn, ppn, flags, i);
625 if (IS_ERR(pmbe)) {
626 WARN_ON_ONCE(1);
627 continue;
628 }
629
630 spin_lock_irqsave(&pmbe->lock, irqflags);
631
632 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
633 if (pmb_sizes[j].flag == size)
634 pmbe->size = pmb_sizes[j].size;
635
636 if (pmbp) {
637 spin_lock(&pmbp->lock);
638
639 /*
640 * Compare the previous entry against the current one to
641 * see if the entries span a contiguous mapping. If so,
642 * setup the entry links accordingly. Compound mappings
643 * are later coalesced.
644 */
645 if (pmb_can_merge(pmbp, pmbe))
646 pmbp->link = pmbe;
647
648 spin_unlock(&pmbp->lock);
649 }
650
651 pmbp = pmbe;
652
653 spin_unlock_irqrestore(&pmbe->lock, irqflags);
654 }
655 }
656
657 static void __init pmb_merge(struct pmb_entry *head)
658 {
659 unsigned long span, newsize;
660 struct pmb_entry *tail;
661 int i = 1, depth = 0;
662
663 span = newsize = head->size;
664
665 tail = head->link;
666 while (tail) {
667 span += tail->size;
668
669 if (pmb_size_valid(span)) {
670 newsize = span;
671 depth = i;
672 }
673
674 /* This is the end of the line.. */
675 if (!tail->link)
676 break;
677
678 tail = tail->link;
679 i++;
680 }
681
682 /*
683 * The merged page size must be valid.
684 */
685 if (!depth || !pmb_size_valid(newsize))
686 return;
687
688 head->flags &= ~PMB_SZ_MASK;
689 head->flags |= pmb_size_to_flags(newsize);
690
691 head->size = newsize;
692
693 __pmb_unmap_entry(head->link, depth);
694 __set_pmb_entry(head);
695 }
696
697 static void __init pmb_coalesce(void)
698 {
699 unsigned long flags;
700 int i;
701
702 write_lock_irqsave(&pmb_rwlock, flags);
703
704 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
705 struct pmb_entry *pmbe;
706
707 if (!test_bit(i, pmb_map))
708 continue;
709
710 pmbe = &pmb_entry_list[i];
711
712 /*
713 * We're only interested in compound mappings
714 */
715 if (!pmbe->link)
716 continue;
717
718 /*
719 * Nothing to do if it already uses the largest possible
720 * page size.
721 */
722 if (pmbe->size == SZ_512M)
723 continue;
724
725 pmb_merge(pmbe);
726 }
727
728 write_unlock_irqrestore(&pmb_rwlock, flags);
729 }
730
731 #ifdef CONFIG_UNCACHED_MAPPING
732 static void __init pmb_resize(void)
733 {
734 int i;
735
736 /*
737 * If the uncached mapping was constructed by the kernel, it will
738 * already be a reasonable size.
739 */
740 if (uncached_size == SZ_16M)
741 return;
742
743 read_lock(&pmb_rwlock);
744
745 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
746 struct pmb_entry *pmbe;
747 unsigned long flags;
748
749 if (!test_bit(i, pmb_map))
750 continue;
751
752 pmbe = &pmb_entry_list[i];
753
754 if (pmbe->vpn != uncached_start)
755 continue;
756
757 /*
758 * Found it, now resize it.
759 */
760 spin_lock_irqsave(&pmbe->lock, flags);
761
762 pmbe->size = SZ_16M;
763 pmbe->flags &= ~PMB_SZ_MASK;
764 pmbe->flags |= pmb_size_to_flags(pmbe->size);
765
766 uncached_resize(pmbe->size);
767
768 __set_pmb_entry(pmbe);
769
770 spin_unlock_irqrestore(&pmbe->lock, flags);
771 }
772
773 read_lock(&pmb_rwlock);
774 }
775 #endif
776
777 static int __init early_pmb(char *p)
778 {
779 if (!p)
780 return 0;
781
782 if (strstr(p, "iomap"))
783 pmb_iomapping_enabled = 1;
784
785 return 0;
786 }
787 early_param("pmb", early_pmb);
788
789 void __init pmb_init(void)
790 {
791 /* Synchronize software state */
792 pmb_synchronize();
793
794 /* Attempt to combine compound mappings */
795 pmb_coalesce();
796
797 #ifdef CONFIG_UNCACHED_MAPPING
798 /* Resize initial mappings, if necessary */
799 pmb_resize();
800 #endif
801
802 /* Log them */
803 pmb_notify();
804
805 writel_uncached(0, PMB_IRMCR);
806
807 /* Flush out the TLB */
808 local_flush_tlb_all();
809 ctrl_barrier();
810 }
811
812 bool __in_29bit_mode(void)
813 {
814 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
815 }
816
817 static int pmb_seq_show(struct seq_file *file, void *iter)
818 {
819 int i;
820
821 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
822 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
823 seq_printf(file, "ety vpn ppn size flags\n");
824
825 for (i = 0; i < NR_PMB_ENTRIES; i++) {
826 unsigned long addr, data;
827 unsigned int size;
828 char *sz_str = NULL;
829
830 addr = __raw_readl(mk_pmb_addr(i));
831 data = __raw_readl(mk_pmb_data(i));
832
833 size = data & PMB_SZ_MASK;
834 sz_str = (size == PMB_SZ_16M) ? " 16MB":
835 (size == PMB_SZ_64M) ? " 64MB":
836 (size == PMB_SZ_128M) ? "128MB":
837 "512MB";
838
839 /* 02: V 0x88 0x08 128MB C CB B */
840 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
841 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
842 (addr >> 24) & 0xff, (data >> 24) & 0xff,
843 sz_str, (data & PMB_C) ? 'C' : ' ',
844 (data & PMB_WT) ? "WT" : "CB",
845 (data & PMB_UB) ? "UB" : " B");
846 }
847
848 return 0;
849 }
850
851 static int pmb_debugfs_open(struct inode *inode, struct file *file)
852 {
853 return single_open(file, pmb_seq_show, NULL);
854 }
855
856 static const struct file_operations pmb_debugfs_fops = {
857 .owner = THIS_MODULE,
858 .open = pmb_debugfs_open,
859 .read = seq_read,
860 .llseek = seq_lseek,
861 .release = single_release,
862 };
863
864 static int __init pmb_debugfs_init(void)
865 {
866 struct dentry *dentry;
867
868 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
869 sh_debugfs_root, NULL, &pmb_debugfs_fops);
870 if (!dentry)
871 return -ENOMEM;
872 if (IS_ERR(dentry))
873 return PTR_ERR(dentry);
874
875 return 0;
876 }
877 subsys_initcall(pmb_debugfs_init);
878
879 #ifdef CONFIG_PM
880 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
881 {
882 static pm_message_t prev_state;
883 int i;
884
885 /* Restore the PMB after a resume from hibernation */
886 if (state.event == PM_EVENT_ON &&
887 prev_state.event == PM_EVENT_FREEZE) {
888 struct pmb_entry *pmbe;
889
890 read_lock(&pmb_rwlock);
891
892 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
893 if (test_bit(i, pmb_map)) {
894 pmbe = &pmb_entry_list[i];
895 set_pmb_entry(pmbe);
896 }
897 }
898
899 read_unlock(&pmb_rwlock);
900 }
901
902 prev_state = state;
903
904 return 0;
905 }
906
907 static int pmb_sysdev_resume(struct sys_device *dev)
908 {
909 return pmb_sysdev_suspend(dev, PMSG_ON);
910 }
911
912 static struct sysdev_driver pmb_sysdev_driver = {
913 .suspend = pmb_sysdev_suspend,
914 .resume = pmb_sysdev_resume,
915 };
916
917 static int __init pmb_sysdev_init(void)
918 {
919 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
920 }
921 subsys_initcall(pmb_sysdev_init);
922 #endif