2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/gfp.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/x86_init.h>
24 #include <asm/pgtable.h>
25 #include <asm/fcntl.h>
34 int __read_mostly pat_enabled
= 1;
36 static inline void pat_disable(const char *reason
)
39 printk(KERN_INFO
"%s\n", reason
);
42 static int __init
nopat(char *str
)
44 pat_disable("PAT support disabled.");
47 early_param("nopat", nopat
);
49 static inline void pat_disable(const char *reason
)
56 static int debug_enable
;
58 static int __init
pat_debug_setup(char *str
)
63 __setup("debugpat", pat_debug_setup
);
65 #define dprintk(fmt, arg...) \
66 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
69 static u64 __read_mostly boot_pat_state
;
72 PAT_UC
= 0, /* uncached */
73 PAT_WC
= 1, /* Write combining */
74 PAT_WT
= 4, /* Write Through */
75 PAT_WP
= 5, /* Write Protected */
76 PAT_WB
= 6, /* Write Back (default) */
77 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
80 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
85 bool boot_cpu
= !boot_pat_state
;
91 if (!boot_pat_state
) {
92 pat_disable("PAT not supported by CPU.");
96 * If this happens we are on a secondary CPU, but
97 * switched to PAT on the boot CPU. We have no way to
100 printk(KERN_ERR
"PAT enabled, "
101 "but not supported by secondary CPU\n");
106 /* Set PWT to Write-Combining. All other bits stay the same */
108 * PTE encoding used in Linux:
113 * 000 WB _PAGE_CACHE_WB
114 * 001 WC _PAGE_CACHE_WC
115 * 010 UC- _PAGE_CACHE_UC_MINUS
116 * 011 UC _PAGE_CACHE_UC
119 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
120 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
124 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
126 wrmsrl(MSR_IA32_CR_PAT
, pat
);
129 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
130 smp_processor_id(), boot_pat_state
, pat
);
135 static char *cattr_name(unsigned long flags
)
137 switch (flags
& _PAGE_CACHE_MASK
) {
138 case _PAGE_CACHE_UC
: return "uncached";
139 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
140 case _PAGE_CACHE_WB
: return "write-back";
141 case _PAGE_CACHE_WC
: return "write-combining";
142 default: return "broken";
147 * The global memtype list keeps track of memory type for specific
148 * physical memory areas. Conflicting memory types in different
149 * mappings can cause CPU cache corruption. To avoid this we keep track.
151 * The list is sorted based on starting address and can contain multiple
152 * entries for each address (this allows reference counting for overlapping
153 * areas). All the aliases have the same cache attributes of course.
154 * Zero attributes are represented as holes.
156 * The data structure is a list that is also organized as an rbtree
157 * sorted on the start address of memtype range.
159 * memtype_lock protects both the linear list and rbtree.
170 static struct rb_root memtype_rbroot
= RB_ROOT
;
171 static LIST_HEAD(memtype_list
);
172 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
174 static struct memtype
*memtype_rb_search(struct rb_root
*root
, u64 start
)
176 struct rb_node
*node
= root
->rb_node
;
177 struct memtype
*last_lower
= NULL
;
180 struct memtype
*data
= container_of(node
, struct memtype
, rb
);
182 if (data
->start
< start
) {
184 node
= node
->rb_right
;
185 } else if (data
->start
> start
) {
186 node
= node
->rb_left
;
191 /* Will return NULL if there is no entry with its start <= start */
195 static void memtype_rb_insert(struct rb_root
*root
, struct memtype
*data
)
197 struct rb_node
**new = &(root
->rb_node
);
198 struct rb_node
*parent
= NULL
;
201 struct memtype
*this = container_of(*new, struct memtype
, rb
);
204 if (data
->start
<= this->start
)
205 new = &((*new)->rb_left
);
206 else if (data
->start
> this->start
)
207 new = &((*new)->rb_right
);
210 rb_link_node(&data
->rb
, parent
, new);
211 rb_insert_color(&data
->rb
, root
);
215 * Does intersection of PAT memory type and MTRR memory type and returns
216 * the resulting memory type as PAT understands it.
217 * (Type in pat and mtrr will not have same value)
218 * The intersection is based on "Effective Memory Type" tables in IA-32
221 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
224 * Look for MTRR hint to get the effective type in case where PAT
227 if (req_type
== _PAGE_CACHE_WB
) {
230 mtrr_type
= mtrr_type_lookup(start
, end
);
231 if (mtrr_type
!= MTRR_TYPE_WRBACK
)
232 return _PAGE_CACHE_UC_MINUS
;
234 return _PAGE_CACHE_WB
;
241 chk_conflict(struct memtype
*new, struct memtype
*entry
, unsigned long *type
)
243 if (new->type
!= entry
->type
) {
245 new->type
= entry
->type
;
251 /* check overlaps with more than one entry in the list */
252 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
253 if (new->end
<= entry
->start
)
255 else if (new->type
!= entry
->type
)
261 printk(KERN_INFO
"%s:%d conflicting memory types "
262 "%Lx-%Lx %s<->%s\n", current
->comm
, current
->pid
, new->start
,
263 new->end
, cattr_name(new->type
), cattr_name(entry
->type
));
267 static int pat_pagerange_is_ram(unsigned long start
, unsigned long end
)
269 int ram_page
= 0, not_rampage
= 0;
270 unsigned long page_nr
;
272 for (page_nr
= (start
>> PAGE_SHIFT
); page_nr
< (end
>> PAGE_SHIFT
);
275 * For legacy reasons, physical address range in the legacy ISA
276 * region is tracked as non-RAM. This will allow users of
277 * /dev/mem to map portions of legacy ISA region, even when
278 * some of those portions are listed(or not even listed) with
279 * different e820 types(RAM/reserved/..)
281 if (page_nr
>= (ISA_END_ADDRESS
>> PAGE_SHIFT
) &&
282 page_is_ram(page_nr
))
287 if (ram_page
== not_rampage
)
295 * For RAM pages, we use page flags to mark the pages with appropriate type.
296 * Here we do two pass:
297 * - Find the memtype of all the pages in the range, look for any conflicts
298 * - In case of no conflicts, set the new memtype for pages in the range
300 * Caller must hold memtype_lock for atomicity.
302 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
303 unsigned long *new_type
)
308 if (req_type
== _PAGE_CACHE_UC
) {
309 /* We do not support strong UC */
311 req_type
= _PAGE_CACHE_UC_MINUS
;
314 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
317 page
= pfn_to_page(pfn
);
318 type
= get_page_memtype(page
);
320 printk(KERN_INFO
"reserve_ram_pages_type failed "
321 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
322 start
, end
, type
, req_type
);
331 *new_type
= req_type
;
333 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
334 page
= pfn_to_page(pfn
);
335 set_page_memtype(page
, req_type
);
340 static int free_ram_pages_type(u64 start
, u64 end
)
345 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
346 page
= pfn_to_page(pfn
);
347 set_page_memtype(page
, -1);
353 * req_type typically has one of the:
356 * - _PAGE_CACHE_UC_MINUS
359 * If new_type is NULL, function will return an error if it cannot reserve the
360 * region with req_type. If new_type is non-NULL, function will return
361 * available type in new_type in case of no error. In case of any error
362 * it will return a negative return value.
364 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
365 unsigned long *new_type
)
367 struct memtype
*new, *entry
;
368 unsigned long actual_type
;
369 struct list_head
*where
;
373 BUG_ON(start
>= end
); /* end is exclusive */
376 /* This is identical to page table setting without PAT */
378 if (req_type
== _PAGE_CACHE_WC
)
379 *new_type
= _PAGE_CACHE_UC_MINUS
;
381 *new_type
= req_type
& _PAGE_CACHE_MASK
;
386 /* Low ISA region is always mapped WB in page table. No need to track */
387 if (x86_platform
.is_untracked_pat_range(start
, end
)) {
389 *new_type
= _PAGE_CACHE_WB
;
394 * Call mtrr_lookup to get the type hint. This is an
395 * optimization for /dev/mem mmap'ers into WB memory (BIOS
396 * tools and ACPI tools). Use WB request for WB memory and use
397 * UC_MINUS otherwise.
399 actual_type
= pat_x_mtrr_type(start
, end
, req_type
& _PAGE_CACHE_MASK
);
402 *new_type
= actual_type
;
404 is_range_ram
= pat_pagerange_is_ram(start
, end
);
405 if (is_range_ram
== 1) {
407 spin_lock(&memtype_lock
);
408 err
= reserve_ram_pages_type(start
, end
, req_type
, new_type
);
409 spin_unlock(&memtype_lock
);
412 } else if (is_range_ram
< 0) {
416 new = kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
422 new->type
= actual_type
;
424 spin_lock(&memtype_lock
);
426 /* Search for existing mapping that overlaps the current range */
428 list_for_each_entry(entry
, &memtype_list
, nd
) {
429 if (end
<= entry
->start
) {
430 where
= entry
->nd
.prev
;
432 } else if (start
<= entry
->start
) { /* end > entry->start */
433 err
= chk_conflict(new, entry
, new_type
);
435 dprintk("Overlap at 0x%Lx-0x%Lx\n",
436 entry
->start
, entry
->end
);
437 where
= entry
->nd
.prev
;
440 } else if (start
< entry
->end
) { /* start > entry->start */
441 err
= chk_conflict(new, entry
, new_type
);
443 dprintk("Overlap at 0x%Lx-0x%Lx\n",
444 entry
->start
, entry
->end
);
447 * Move to right position in the linked
448 * list to add this new entry
450 list_for_each_entry_continue(entry
,
452 if (start
<= entry
->start
) {
453 where
= entry
->nd
.prev
;
463 printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, "
464 "track %s, req %s\n",
465 start
, end
, cattr_name(new->type
), cattr_name(req_type
));
467 spin_unlock(&memtype_lock
);
473 list_add(&new->nd
, where
);
475 list_add_tail(&new->nd
, &memtype_list
);
477 memtype_rb_insert(&memtype_rbroot
, new);
479 spin_unlock(&memtype_lock
);
481 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
482 start
, end
, cattr_name(new->type
), cattr_name(req_type
),
483 new_type
? cattr_name(*new_type
) : "-");
488 int free_memtype(u64 start
, u64 end
)
490 struct memtype
*entry
, *saved_entry
;
497 /* Low ISA region is always mapped WB. No need to track */
498 if (x86_platform
.is_untracked_pat_range(start
, end
))
501 is_range_ram
= pat_pagerange_is_ram(start
, end
);
502 if (is_range_ram
== 1) {
504 spin_lock(&memtype_lock
);
505 err
= free_ram_pages_type(start
, end
);
506 spin_unlock(&memtype_lock
);
509 } else if (is_range_ram
< 0) {
513 spin_lock(&memtype_lock
);
515 entry
= memtype_rb_search(&memtype_rbroot
, start
);
516 if (unlikely(entry
== NULL
))
520 * Saved entry points to an entry with start same or less than what
521 * we searched for. Now go through the list in both directions to look
522 * for the entry that matches with both start and end, with list stored
523 * in sorted start address
526 list_for_each_entry_from(entry
, &memtype_list
, nd
) {
527 if (entry
->start
== start
&& entry
->end
== end
) {
528 rb_erase(&entry
->rb
, &memtype_rbroot
);
529 list_del(&entry
->nd
);
533 } else if (entry
->start
> start
) {
542 list_for_each_entry_reverse(entry
, &memtype_list
, nd
) {
543 if (entry
->start
== start
&& entry
->end
== end
) {
544 rb_erase(&entry
->rb
, &memtype_rbroot
);
545 list_del(&entry
->nd
);
549 } else if (entry
->start
< start
) {
554 spin_unlock(&memtype_lock
);
557 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
558 current
->comm
, current
->pid
, start
, end
);
561 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
568 * lookup_memtype - Looksup the memory type for a physical address
569 * @paddr: physical address of which memory type needs to be looked up
571 * Only to be called when PAT is enabled
573 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
576 static unsigned long lookup_memtype(u64 paddr
)
578 int rettype
= _PAGE_CACHE_WB
;
579 struct memtype
*entry
;
581 if (x86_platform
.is_untracked_pat_range(paddr
, paddr
+ PAGE_SIZE
))
584 if (pat_pagerange_is_ram(paddr
, paddr
+ PAGE_SIZE
)) {
586 spin_lock(&memtype_lock
);
587 page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
588 rettype
= get_page_memtype(page
);
589 spin_unlock(&memtype_lock
);
591 * -1 from get_page_memtype() implies RAM page is in its
592 * default state and not reserved, and hence of type WB
595 rettype
= _PAGE_CACHE_WB
;
600 spin_lock(&memtype_lock
);
602 entry
= memtype_rb_search(&memtype_rbroot
, paddr
);
604 rettype
= entry
->type
;
606 rettype
= _PAGE_CACHE_UC_MINUS
;
608 spin_unlock(&memtype_lock
);
613 * io_reserve_memtype - Request a memory type mapping for a region of memory
614 * @start: start (physical address) of the region
615 * @end: end (physical address) of the region
616 * @type: A pointer to memtype, with requested type. On success, requested
617 * or any other compatible type that was available for the region is returned
619 * On success, returns 0
620 * On failure, returns non-zero
622 int io_reserve_memtype(resource_size_t start
, resource_size_t end
,
625 resource_size_t size
= end
- start
;
626 unsigned long req_type
= *type
;
627 unsigned long new_type
;
630 WARN_ON_ONCE(iomem_map_sanity_check(start
, size
));
632 ret
= reserve_memtype(start
, end
, req_type
, &new_type
);
636 if (!is_new_memtype_allowed(start
, size
, req_type
, new_type
))
639 if (kernel_map_sync_memtype(start
, size
, new_type
) < 0)
646 free_memtype(start
, end
);
653 * io_free_memtype - Release a memory type mapping for a region of memory
654 * @start: start (physical address) of the region
655 * @end: end (physical address) of the region
657 void io_free_memtype(resource_size_t start
, resource_size_t end
)
659 free_memtype(start
, end
);
662 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
663 unsigned long size
, pgprot_t vma_prot
)
668 #ifdef CONFIG_STRICT_DEVMEM
669 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
670 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
675 /* This check is needed to avoid cache aliasing when PAT is enabled */
676 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
678 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
679 u64 to
= from
+ size
;
685 while (cursor
< to
) {
686 if (!devmem_is_allowed(pfn
)) {
688 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
689 current
->comm
, from
, to
);
697 #endif /* CONFIG_STRICT_DEVMEM */
699 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
700 unsigned long size
, pgprot_t
*vma_prot
)
702 unsigned long flags
= _PAGE_CACHE_WB
;
704 if (!range_is_allowed(pfn
, size
))
707 if (file
->f_flags
& O_SYNC
) {
708 flags
= _PAGE_CACHE_UC_MINUS
;
713 * On the PPro and successors, the MTRRs are used to set
714 * memory types for physical addresses outside main memory,
715 * so blindly setting UC or PWT on those pages is wrong.
716 * For Pentiums and earlier, the surround logic should disable
717 * caching for the high addresses through the KEN pin, but
718 * we maintain the tradition of paranoia in this code.
721 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
722 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
723 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
724 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
725 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
726 flags
= _PAGE_CACHE_UC
;
730 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
736 * Change the memory type for the physial address range in kernel identity
737 * mapping space if that range is a part of identity map.
739 int kernel_map_sync_memtype(u64 base
, unsigned long size
, unsigned long flags
)
743 if (base
>= __pa(high_memory
))
746 id_sz
= (__pa(high_memory
) < base
+ size
) ?
747 __pa(high_memory
) - base
:
750 if (ioremap_change_attr((unsigned long)__va(base
), id_sz
, flags
) < 0) {
752 "%s:%d ioremap_change_attr failed %s "
754 current
->comm
, current
->pid
,
756 base
, (unsigned long long)(base
+ size
));
763 * Internal interface to reserve a range of physical memory with prot.
764 * Reserved non RAM regions only and after successful reserve_memtype,
765 * this func also keeps identity mapping (if any) in sync with this new prot.
767 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
772 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
773 unsigned long flags
= want_flags
;
775 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
778 * reserve_pfn_range() for RAM pages. We do not refcount to keep
779 * track of number of mappings of RAM pages. We can assert that
780 * the type requested matches the type of first page in the range.
786 flags
= lookup_memtype(paddr
);
787 if (want_flags
!= flags
) {
789 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
790 current
->comm
, current
->pid
,
791 cattr_name(want_flags
),
792 (unsigned long long)paddr
,
793 (unsigned long long)(paddr
+ size
),
795 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
796 (~_PAGE_CACHE_MASK
)) |
802 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
806 if (flags
!= want_flags
) {
808 !is_new_memtype_allowed(paddr
, size
, want_flags
, flags
)) {
809 free_memtype(paddr
, paddr
+ size
);
810 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
811 " for %Lx-%Lx, got %s\n",
812 current
->comm
, current
->pid
,
813 cattr_name(want_flags
),
814 (unsigned long long)paddr
,
815 (unsigned long long)(paddr
+ size
),
820 * We allow returning different type than the one requested in
823 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
824 (~_PAGE_CACHE_MASK
)) |
828 if (kernel_map_sync_memtype(paddr
, size
, flags
) < 0) {
829 free_memtype(paddr
, paddr
+ size
);
836 * Internal interface to free a range of physical memory.
837 * Frees non RAM regions only.
839 static void free_pfn_range(u64 paddr
, unsigned long size
)
843 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
845 free_memtype(paddr
, paddr
+ size
);
849 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
850 * copied through copy_page_range().
852 * If the vma has a linear pfn mapping for the entire range, we get the prot
853 * from pte and reserve the entire vma range with single reserve_pfn_range call.
855 int track_pfn_vma_copy(struct vm_area_struct
*vma
)
857 resource_size_t paddr
;
859 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
862 if (is_linear_pfn_mapping(vma
)) {
864 * reserve the whole chunk covered by vma. We need the
865 * starting address and protection from pte.
867 if (follow_phys(vma
, vma
->vm_start
, 0, &prot
, &paddr
)) {
871 pgprot
= __pgprot(prot
);
872 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
879 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
880 * for physical range indicated by pfn and size.
882 * prot is passed in as a parameter for the new mapping. If the vma has a
883 * linear pfn mapping for the entire range reserve the entire vma range with
884 * single reserve_pfn_range call.
886 int track_pfn_vma_new(struct vm_area_struct
*vma
, pgprot_t
*prot
,
887 unsigned long pfn
, unsigned long size
)
890 resource_size_t paddr
;
891 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
893 if (is_linear_pfn_mapping(vma
)) {
894 /* reserve the whole chunk starting from vm_pgoff */
895 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
896 return reserve_pfn_range(paddr
, vma_size
, prot
, 0);
902 /* for vm_insert_pfn and friends, we set prot based on lookup */
903 flags
= lookup_memtype(pfn
<< PAGE_SHIFT
);
904 *prot
= __pgprot((pgprot_val(vma
->vm_page_prot
) & (~_PAGE_CACHE_MASK
)) |
911 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
912 * untrack can be called for a specific region indicated by pfn and size or
913 * can be for the entire vma (in which case size can be zero).
915 void untrack_pfn_vma(struct vm_area_struct
*vma
, unsigned long pfn
,
918 resource_size_t paddr
;
919 unsigned long vma_size
= vma
->vm_end
- vma
->vm_start
;
921 if (is_linear_pfn_mapping(vma
)) {
922 /* free the whole chunk starting from vm_pgoff */
923 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
924 free_pfn_range(paddr
, vma_size
);
929 pgprot_t
pgprot_writecombine(pgprot_t prot
)
932 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
934 return pgprot_noncached(prot
);
936 EXPORT_SYMBOL_GPL(pgprot_writecombine
);
938 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
940 /* get Nth element of the linked list */
941 static struct memtype
*memtype_get_idx(loff_t pos
)
943 struct memtype
*list_node
, *print_entry
;
946 print_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
950 spin_lock(&memtype_lock
);
951 list_for_each_entry(list_node
, &memtype_list
, nd
) {
953 *print_entry
= *list_node
;
954 spin_unlock(&memtype_lock
);
959 spin_unlock(&memtype_lock
);
965 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
969 seq_printf(seq
, "PAT memtype list:\n");
972 return memtype_get_idx(*pos
);
975 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
978 return memtype_get_idx(*pos
);
981 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
985 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
987 struct memtype
*print_entry
= (struct memtype
*)v
;
989 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
990 print_entry
->start
, print_entry
->end
);
996 static const struct seq_operations memtype_seq_ops
= {
997 .start
= memtype_seq_start
,
998 .next
= memtype_seq_next
,
999 .stop
= memtype_seq_stop
,
1000 .show
= memtype_seq_show
,
1003 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
1005 return seq_open(file
, &memtype_seq_ops
);
1008 static const struct file_operations memtype_fops
= {
1009 .open
= memtype_seq_open
,
1011 .llseek
= seq_lseek
,
1012 .release
= seq_release
,
1015 static int __init
pat_memtype_list_init(void)
1018 debugfs_create_file("pat_memtype_list", S_IRUSR
,
1019 arch_debugfs_dir
, NULL
, &memtype_fops
);
1024 late_initcall(pat_memtype_list_init
);
1026 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */