vfs: Implement proper O_SYNC semantics
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
2e5d9c85 15#include <linux/gfp.h>
ad2cde16 16#include <linux/mm.h>
2e5d9c85 17#include <linux/fs.h>
335ef896 18#include <linux/rbtree.h>
2e5d9c85 19
ad2cde16 20#include <asm/cacheflush.h>
2e5d9c85 21#include <asm/processor.h>
ad2cde16 22#include <asm/tlbflush.h>
fd12a0d6 23#include <asm/x86_init.h>
2e5d9c85 24#include <asm/pgtable.h>
2e5d9c85 25#include <asm/fcntl.h>
ad2cde16 26#include <asm/e820.h>
2e5d9c85 27#include <asm/mtrr.h>
ad2cde16
IM
28#include <asm/page.h>
29#include <asm/msr.h>
30#include <asm/pat.h>
e7f260a2 31#include <asm/io.h>
2e5d9c85 32
8d4a4300 33#ifdef CONFIG_X86_PAT
499f8f84 34int __read_mostly pat_enabled = 1;
2e5d9c85 35
1ee4bd92 36static inline void pat_disable(const char *reason)
2e5d9c85 37{
499f8f84 38 pat_enabled = 0;
8d4a4300 39 printk(KERN_INFO "%s\n", reason);
2e5d9c85 40}
2e5d9c85 41
be524fb9 42static int __init nopat(char *str)
2e5d9c85 43{
8d4a4300 44 pat_disable("PAT support disabled.");
2e5d9c85 45 return 0;
46}
8d4a4300 47early_param("nopat", nopat);
75a04811
PA
48#else
49static inline void pat_disable(const char *reason)
50{
51 (void)reason;
52}
8d4a4300
TG
53#endif
54
77b52b4c
VP
55
56static int debug_enable;
ad2cde16 57
77b52b4c
VP
58static int __init pat_debug_setup(char *str)
59{
60 debug_enable = 1;
61 return 0;
62}
63__setup("debugpat", pat_debug_setup);
64
65#define dprintk(fmt, arg...) \
66 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
67
68
8d4a4300 69static u64 __read_mostly boot_pat_state;
2e5d9c85 70
71enum {
72 PAT_UC = 0, /* uncached */
73 PAT_WC = 1, /* Write combining */
74 PAT_WT = 4, /* Write Through */
75 PAT_WP = 5, /* Write Protected */
76 PAT_WB = 6, /* Write Back (default) */
77 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
78};
79
cd7a4e93 80#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 81
82void pat_init(void)
83{
84 u64 pat;
e23a8b6a 85 bool boot_cpu = !boot_pat_state;
2e5d9c85 86
499f8f84 87 if (!pat_enabled)
2e5d9c85 88 return;
89
75a04811
PA
90 if (!cpu_has_pat) {
91 if (!boot_pat_state) {
92 pat_disable("PAT not supported by CPU.");
93 return;
94 } else {
95 /*
96 * If this happens we are on a secondary CPU, but
97 * switched to PAT on the boot CPU. We have no way to
98 * undo PAT.
99 */
100 printk(KERN_ERR "PAT enabled, "
101 "but not supported by secondary CPU\n");
102 BUG();
103 }
8d4a4300 104 }
2e5d9c85 105
106 /* Set PWT to Write-Combining. All other bits stay the same */
107 /*
108 * PTE encoding used in Linux:
109 * PAT
110 * |PCD
111 * ||PWT
112 * |||
113 * 000 WB _PAGE_CACHE_WB
114 * 001 WC _PAGE_CACHE_WC
115 * 010 UC- _PAGE_CACHE_UC_MINUS
116 * 011 UC _PAGE_CACHE_UC
117 * PAT bit unused
118 */
cd7a4e93
AH
119 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
120 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
2e5d9c85 121
122 /* Boot CPU check */
8d4a4300 123 if (!boot_pat_state)
2e5d9c85 124 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
2e5d9c85 125
126 wrmsrl(MSR_IA32_CR_PAT, pat);
e23a8b6a
RD
127
128 if (boot_cpu)
129 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
130 smp_processor_id(), boot_pat_state, pat);
2e5d9c85 131}
132
133#undef PAT
134
135static char *cattr_name(unsigned long flags)
136{
137 switch (flags & _PAGE_CACHE_MASK) {
cd7a4e93
AH
138 case _PAGE_CACHE_UC: return "uncached";
139 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
140 case _PAGE_CACHE_WB: return "write-back";
141 case _PAGE_CACHE_WC: return "write-combining";
142 default: return "broken";
2e5d9c85 143 }
144}
145
146/*
147 * The global memtype list keeps track of memory type for specific
148 * physical memory areas. Conflicting memory types in different
149 * mappings can cause CPU cache corruption. To avoid this we keep track.
150 *
151 * The list is sorted based on starting address and can contain multiple
152 * entries for each address (this allows reference counting for overlapping
153 * areas). All the aliases have the same cache attributes of course.
154 * Zero attributes are represented as holes.
155 *
335ef896
VP
156 * The data structure is a list that is also organized as an rbtree
157 * sorted on the start address of memtype range.
2e5d9c85 158 *
335ef896 159 * memtype_lock protects both the linear list and rbtree.
2e5d9c85 160 */
161
162struct memtype {
ad2cde16
IM
163 u64 start;
164 u64 end;
165 unsigned long type;
166 struct list_head nd;
335ef896 167 struct rb_node rb;
2e5d9c85 168};
169
335ef896 170static struct rb_root memtype_rbroot = RB_ROOT;
2e5d9c85 171static LIST_HEAD(memtype_list);
ad2cde16 172static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
2e5d9c85 173
335ef896
VP
174static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
175{
176 struct rb_node *node = root->rb_node;
177 struct memtype *last_lower = NULL;
178
179 while (node) {
180 struct memtype *data = container_of(node, struct memtype, rb);
181
182 if (data->start < start) {
183 last_lower = data;
184 node = node->rb_right;
185 } else if (data->start > start) {
186 node = node->rb_left;
187 } else
188 return data;
189 }
190
191 /* Will return NULL if there is no entry with its start <= start */
192 return last_lower;
193}
194
195static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
196{
197 struct rb_node **new = &(root->rb_node);
198 struct rb_node *parent = NULL;
199
200 while (*new) {
201 struct memtype *this = container_of(*new, struct memtype, rb);
202
203 parent = *new;
204 if (data->start <= this->start)
205 new = &((*new)->rb_left);
206 else if (data->start > this->start)
207 new = &((*new)->rb_right);
208 }
209
210 rb_link_node(&data->rb, parent, new);
211 rb_insert_color(&data->rb, root);
212}
213
2e5d9c85 214/*
215 * Does intersection of PAT memory type and MTRR memory type and returns
216 * the resulting memory type as PAT understands it.
217 * (Type in pat and mtrr will not have same value)
218 * The intersection is based on "Effective Memory Type" tables in IA-32
219 * SDM vol 3a
220 */
6cf514fc 221static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
2e5d9c85 222{
c26421d0
VP
223 /*
224 * Look for MTRR hint to get the effective type in case where PAT
225 * request is for WB.
226 */
dd0c7c49
AH
227 if (req_type == _PAGE_CACHE_WB) {
228 u8 mtrr_type;
229
230 mtrr_type = mtrr_type_lookup(start, end);
b6ff32d9
SS
231 if (mtrr_type != MTRR_TYPE_WRBACK)
232 return _PAGE_CACHE_UC_MINUS;
233
234 return _PAGE_CACHE_WB;
dd0c7c49
AH
235 }
236
237 return req_type;
2e5d9c85 238}
239
ad2cde16
IM
240static int
241chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
64fe44c3
AH
242{
243 if (new->type != entry->type) {
244 if (type) {
245 new->type = entry->type;
246 *type = entry->type;
247 } else
248 goto conflict;
249 }
250
251 /* check overlaps with more than one entry in the list */
252 list_for_each_entry_continue(entry, &memtype_list, nd) {
253 if (new->end <= entry->start)
254 break;
255 else if (new->type != entry->type)
256 goto conflict;
257 }
258 return 0;
259
260 conflict:
261 printk(KERN_INFO "%s:%d conflicting memory types "
262 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
263 new->end, cattr_name(new->type), cattr_name(entry->type));
264 return -EBUSY;
265}
266
be03d9e8
SS
267static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
268{
269 int ram_page = 0, not_rampage = 0;
270 unsigned long page_nr;
271
272 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
273 ++page_nr) {
274 /*
275 * For legacy reasons, physical address range in the legacy ISA
276 * region is tracked as non-RAM. This will allow users of
277 * /dev/mem to map portions of legacy ISA region, even when
278 * some of those portions are listed(or not even listed) with
279 * different e820 types(RAM/reserved/..)
280 */
281 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
282 page_is_ram(page_nr))
283 ram_page = 1;
284 else
285 not_rampage = 1;
286
287 if (ram_page == not_rampage)
288 return -1;
289 }
290
291 return ram_page;
292}
293
9542ada8 294/*
f5841740
VP
295 * For RAM pages, we use page flags to mark the pages with appropriate type.
296 * Here we do two pass:
297 * - Find the memtype of all the pages in the range, look for any conflicts
298 * - In case of no conflicts, set the new memtype for pages in the range
9542ada8 299 *
f5841740 300 * Caller must hold memtype_lock for atomicity.
9542ada8
SS
301 */
302static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
ad2cde16 303 unsigned long *new_type)
9542ada8
SS
304{
305 struct page *page;
f5841740
VP
306 u64 pfn;
307
308 if (req_type == _PAGE_CACHE_UC) {
309 /* We do not support strong UC */
310 WARN_ON_ONCE(1);
311 req_type = _PAGE_CACHE_UC_MINUS;
312 }
9542ada8
SS
313
314 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
f5841740 315 unsigned long type;
9542ada8 316
f5841740
VP
317 page = pfn_to_page(pfn);
318 type = get_page_memtype(page);
319 if (type != -1) {
320 printk(KERN_INFO "reserve_ram_pages_type failed "
321 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
322 start, end, type, req_type);
323 if (new_type)
324 *new_type = type;
325
326 return -EBUSY;
327 }
9542ada8 328 }
9542ada8 329
f5841740
VP
330 if (new_type)
331 *new_type = req_type;
332
333 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
9542ada8 334 page = pfn_to_page(pfn);
f5841740 335 set_page_memtype(page, req_type);
9542ada8 336 }
f5841740 337 return 0;
9542ada8
SS
338}
339
340static int free_ram_pages_type(u64 start, u64 end)
341{
342 struct page *page;
f5841740 343 u64 pfn;
9542ada8
SS
344
345 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
346 page = pfn_to_page(pfn);
f5841740 347 set_page_memtype(page, -1);
9542ada8
SS
348 }
349 return 0;
9542ada8
SS
350}
351
e7f260a2 352/*
353 * req_type typically has one of the:
354 * - _PAGE_CACHE_WB
355 * - _PAGE_CACHE_WC
356 * - _PAGE_CACHE_UC_MINUS
357 * - _PAGE_CACHE_UC
358 *
ac97991e
AH
359 * If new_type is NULL, function will return an error if it cannot reserve the
360 * region with req_type. If new_type is non-NULL, function will return
361 * available type in new_type in case of no error. In case of any error
e7f260a2 362 * it will return a negative return value.
363 */
2e5d9c85 364int reserve_memtype(u64 start, u64 end, unsigned long req_type,
ad2cde16 365 unsigned long *new_type)
2e5d9c85 366{
ac97991e 367 struct memtype *new, *entry;
2e5d9c85 368 unsigned long actual_type;
f6887264 369 struct list_head *where;
9542ada8 370 int is_range_ram;
ad2cde16 371 int err = 0;
2e5d9c85 372
ad2cde16 373 BUG_ON(start >= end); /* end is exclusive */
69e26be9 374
499f8f84 375 if (!pat_enabled) {
e7f260a2 376 /* This is identical to page table setting without PAT */
ac97991e 377 if (new_type) {
83ea05ea 378 if (req_type == _PAGE_CACHE_WC)
5fc51746 379 *new_type = _PAGE_CACHE_UC_MINUS;
ac97991e
AH
380 else
381 *new_type = req_type & _PAGE_CACHE_MASK;
e7f260a2 382 }
2e5d9c85 383 return 0;
384 }
385
386 /* Low ISA region is always mapped WB in page table. No need to track */
8a271389 387 if (x86_platform.is_untracked_pat_range(start, end)) {
ac97991e
AH
388 if (new_type)
389 *new_type = _PAGE_CACHE_WB;
2e5d9c85 390 return 0;
391 }
392
b6ff32d9
SS
393 /*
394 * Call mtrr_lookup to get the type hint. This is an
395 * optimization for /dev/mem mmap'ers into WB memory (BIOS
396 * tools and ACPI tools). Use WB request for WB memory and use
397 * UC_MINUS otherwise.
398 */
399 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
2e5d9c85 400
95971342
SS
401 if (new_type)
402 *new_type = actual_type;
403
be03d9e8 404 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
405 if (is_range_ram == 1) {
406
407 spin_lock(&memtype_lock);
408 err = reserve_ram_pages_type(start, end, req_type, new_type);
409 spin_unlock(&memtype_lock);
410
411 return err;
412 } else if (is_range_ram < 0) {
9542ada8 413 return -EINVAL;
f5841740 414 }
9542ada8 415
ac97991e
AH
416 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
417 if (!new)
2e5d9c85 418 return -ENOMEM;
419
ad2cde16
IM
420 new->start = start;
421 new->end = end;
422 new->type = actual_type;
2e5d9c85 423
2e5d9c85 424 spin_lock(&memtype_lock);
425
426 /* Search for existing mapping that overlaps the current range */
f6887264 427 where = NULL;
dcb73bf4 428 list_for_each_entry(entry, &memtype_list, nd) {
33af9039 429 if (end <= entry->start) {
f6887264 430 where = entry->nd.prev;
2e5d9c85 431 break;
33af9039 432 } else if (start <= entry->start) { /* end > entry->start */
64fe44c3 433 err = chk_conflict(new, entry, new_type);
33af9039
AH
434 if (!err) {
435 dprintk("Overlap at 0x%Lx-0x%Lx\n",
436 entry->start, entry->end);
437 where = entry->nd.prev;
2e5d9c85 438 }
2e5d9c85 439 break;
33af9039 440 } else if (start < entry->end) { /* start > entry->start */
64fe44c3 441 err = chk_conflict(new, entry, new_type);
33af9039
AH
442 if (!err) {
443 dprintk("Overlap at 0x%Lx-0x%Lx\n",
444 entry->start, entry->end);
80c5e73d
VP
445
446 /*
447 * Move to right position in the linked
448 * list to add this new entry
449 */
450 list_for_each_entry_continue(entry,
451 &memtype_list, nd) {
452 if (start <= entry->start) {
453 where = entry->nd.prev;
454 break;
455 }
456 }
2e5d9c85 457 }
2e5d9c85 458 break;
459 }
460 }
461
462 if (err) {
3e9c83b3
AH
463 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
464 "track %s, req %s\n",
465 start, end, cattr_name(new->type), cattr_name(req_type));
ac97991e 466 kfree(new);
2e5d9c85 467 spin_unlock(&memtype_lock);
ad2cde16 468
2e5d9c85 469 return err;
470 }
471
f6887264
AH
472 if (where)
473 list_add(&new->nd, where);
474 else
ac97991e 475 list_add_tail(&new->nd, &memtype_list);
6997ab49 476
335ef896
VP
477 memtype_rb_insert(&memtype_rbroot, new);
478
2e5d9c85 479 spin_unlock(&memtype_lock);
3e9c83b3
AH
480
481 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
482 start, end, cattr_name(new->type), cattr_name(req_type),
483 new_type ? cattr_name(*new_type) : "-");
484
2e5d9c85 485 return err;
486}
487
488int free_memtype(u64 start, u64 end)
489{
335ef896 490 struct memtype *entry, *saved_entry;
2e5d9c85 491 int err = -EINVAL;
9542ada8 492 int is_range_ram;
2e5d9c85 493
69e26be9 494 if (!pat_enabled)
2e5d9c85 495 return 0;
2e5d9c85 496
497 /* Low ISA region is always mapped WB. No need to track */
8a271389 498 if (x86_platform.is_untracked_pat_range(start, end))
2e5d9c85 499 return 0;
2e5d9c85 500
be03d9e8 501 is_range_ram = pat_pagerange_is_ram(start, end);
f5841740
VP
502 if (is_range_ram == 1) {
503
504 spin_lock(&memtype_lock);
505 err = free_ram_pages_type(start, end);
506 spin_unlock(&memtype_lock);
507
508 return err;
509 } else if (is_range_ram < 0) {
9542ada8 510 return -EINVAL;
f5841740 511 }
9542ada8 512
2e5d9c85 513 spin_lock(&memtype_lock);
335ef896
VP
514
515 entry = memtype_rb_search(&memtype_rbroot, start);
516 if (unlikely(entry == NULL))
517 goto unlock_ret;
518
519 /*
520 * Saved entry points to an entry with start same or less than what
521 * we searched for. Now go through the list in both directions to look
522 * for the entry that matches with both start and end, with list stored
523 * in sorted start address
524 */
525 saved_entry = entry;
dcb73bf4 526 list_for_each_entry_from(entry, &memtype_list, nd) {
ac97991e 527 if (entry->start == start && entry->end == end) {
335ef896
VP
528 rb_erase(&entry->rb, &memtype_rbroot);
529 list_del(&entry->nd);
530 kfree(entry);
531 err = 0;
532 break;
533 } else if (entry->start > start) {
534 break;
535 }
536 }
537
538 if (!err)
539 goto unlock_ret;
80c5e73d 540
335ef896
VP
541 entry = saved_entry;
542 list_for_each_entry_reverse(entry, &memtype_list, nd) {
543 if (entry->start == start && entry->end == end) {
544 rb_erase(&entry->rb, &memtype_rbroot);
ac97991e
AH
545 list_del(&entry->nd);
546 kfree(entry);
2e5d9c85 547 err = 0;
548 break;
335ef896
VP
549 } else if (entry->start < start) {
550 break;
2e5d9c85 551 }
552 }
335ef896 553unlock_ret:
2e5d9c85 554 spin_unlock(&memtype_lock);
555
556 if (err) {
28eb559b 557 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
2e5d9c85 558 current->comm, current->pid, start, end);
559 }
6997ab49 560
77b52b4c 561 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
ad2cde16 562
2e5d9c85 563 return err;
564}
565
f0970c13 566
637b86e7
VP
567/**
568 * lookup_memtype - Looksup the memory type for a physical address
569 * @paddr: physical address of which memory type needs to be looked up
570 *
571 * Only to be called when PAT is enabled
572 *
573 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
574 * _PAGE_CACHE_UC
575 */
576static unsigned long lookup_memtype(u64 paddr)
577{
578 int rettype = _PAGE_CACHE_WB;
579 struct memtype *entry;
580
8a271389 581 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
637b86e7
VP
582 return rettype;
583
584 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
585 struct page *page;
586 spin_lock(&memtype_lock);
587 page = pfn_to_page(paddr >> PAGE_SHIFT);
588 rettype = get_page_memtype(page);
589 spin_unlock(&memtype_lock);
590 /*
591 * -1 from get_page_memtype() implies RAM page is in its
592 * default state and not reserved, and hence of type WB
593 */
594 if (rettype == -1)
595 rettype = _PAGE_CACHE_WB;
596
597 return rettype;
598 }
599
600 spin_lock(&memtype_lock);
601
602 entry = memtype_rb_search(&memtype_rbroot, paddr);
603 if (entry != NULL)
604 rettype = entry->type;
605 else
606 rettype = _PAGE_CACHE_UC_MINUS;
607
608 spin_unlock(&memtype_lock);
609 return rettype;
610}
611
9fd126bc
VP
612/**
613 * io_reserve_memtype - Request a memory type mapping for a region of memory
614 * @start: start (physical address) of the region
615 * @end: end (physical address) of the region
616 * @type: A pointer to memtype, with requested type. On success, requested
617 * or any other compatible type that was available for the region is returned
618 *
619 * On success, returns 0
620 * On failure, returns non-zero
621 */
622int io_reserve_memtype(resource_size_t start, resource_size_t end,
623 unsigned long *type)
624{
b855192c 625 resource_size_t size = end - start;
9fd126bc
VP
626 unsigned long req_type = *type;
627 unsigned long new_type;
628 int ret;
629
b855192c 630 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
9fd126bc
VP
631
632 ret = reserve_memtype(start, end, req_type, &new_type);
633 if (ret)
634 goto out_err;
635
b855192c 636 if (!is_new_memtype_allowed(start, size, req_type, new_type))
9fd126bc
VP
637 goto out_free;
638
b855192c 639 if (kernel_map_sync_memtype(start, size, new_type) < 0)
9fd126bc
VP
640 goto out_free;
641
642 *type = new_type;
643 return 0;
644
645out_free:
646 free_memtype(start, end);
647 ret = -EBUSY;
648out_err:
649 return ret;
650}
651
652/**
653 * io_free_memtype - Release a memory type mapping for a region of memory
654 * @start: start (physical address) of the region
655 * @end: end (physical address) of the region
656 */
657void io_free_memtype(resource_size_t start, resource_size_t end)
658{
659 free_memtype(start, end);
660}
661
f0970c13 662pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
663 unsigned long size, pgprot_t vma_prot)
664{
665 return vma_prot;
666}
667
d092633b
IM
668#ifdef CONFIG_STRICT_DEVMEM
669/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
0124cecf
VP
670static inline int range_is_allowed(unsigned long pfn, unsigned long size)
671{
672 return 1;
673}
674#else
9e41bff2 675/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
676static inline int range_is_allowed(unsigned long pfn, unsigned long size)
677{
678 u64 from = ((u64)pfn) << PAGE_SHIFT;
679 u64 to = from + size;
680 u64 cursor = from;
681
9e41bff2
RT
682 if (!pat_enabled)
683 return 1;
684
0124cecf
VP
685 while (cursor < to) {
686 if (!devmem_is_allowed(pfn)) {
687 printk(KERN_INFO
688 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
689 current->comm, from, to);
690 return 0;
691 }
692 cursor += PAGE_SIZE;
693 pfn++;
694 }
695 return 1;
696}
d092633b 697#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 698
f0970c13 699int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
700 unsigned long size, pgprot_t *vma_prot)
701{
0c3c8a18 702 unsigned long flags = _PAGE_CACHE_WB;
f0970c13 703
0124cecf
VP
704 if (!range_is_allowed(pfn, size))
705 return 0;
706
6b2f3d1f 707 if (file->f_flags & O_DSYNC)
28df82eb 708 flags = _PAGE_CACHE_UC_MINUS;
f0970c13 709
710#ifdef CONFIG_X86_32
711 /*
712 * On the PPro and successors, the MTRRs are used to set
713 * memory types for physical addresses outside main memory,
714 * so blindly setting UC or PWT on those pages is wrong.
715 * For Pentiums and earlier, the surround logic should disable
716 * caching for the high addresses through the KEN pin, but
717 * we maintain the tradition of paranoia in this code.
718 */
499f8f84 719 if (!pat_enabled &&
cd7a4e93
AH
720 !(boot_cpu_has(X86_FEATURE_MTRR) ||
721 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
722 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
723 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
724 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e7f260a2 725 flags = _PAGE_CACHE_UC;
f0970c13 726 }
727#endif
728
e7f260a2 729 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
730 flags);
f0970c13 731 return 1;
732}
e7f260a2 733
7880f746
VP
734/*
735 * Change the memory type for the physial address range in kernel identity
736 * mapping space if that range is a part of identity map.
737 */
738int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
739{
740 unsigned long id_sz;
741
5fc51746 742 if (base >= __pa(high_memory))
7880f746
VP
743 return 0;
744
745 id_sz = (__pa(high_memory) < base + size) ?
746 __pa(high_memory) - base :
747 size;
748
749 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
750 printk(KERN_INFO
751 "%s:%d ioremap_change_attr failed %s "
752 "for %Lx-%Lx\n",
753 current->comm, current->pid,
754 cattr_name(flags),
755 base, (unsigned long long)(base + size));
756 return -EINVAL;
757 }
758 return 0;
759}
760
5899329b 761/*
762 * Internal interface to reserve a range of physical memory with prot.
763 * Reserved non RAM regions only and after successful reserve_memtype,
764 * this func also keeps identity mapping (if any) in sync with this new prot.
765 */
cdecff68 766static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
767 int strict_prot)
5899329b 768{
769 int is_ram = 0;
7880f746 770 int ret;
cdecff68 771 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
0c3c8a18 772 unsigned long flags = want_flags;
5899329b 773
be03d9e8 774 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 775
be03d9e8 776 /*
d886c73c
VP
777 * reserve_pfn_range() for RAM pages. We do not refcount to keep
778 * track of number of mappings of RAM pages. We can assert that
779 * the type requested matches the type of first page in the range.
be03d9e8 780 */
d886c73c
VP
781 if (is_ram) {
782 if (!pat_enabled)
783 return 0;
784
785 flags = lookup_memtype(paddr);
786 if (want_flags != flags) {
787 printk(KERN_WARNING
788 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
789 current->comm, current->pid,
790 cattr_name(want_flags),
791 (unsigned long long)paddr,
792 (unsigned long long)(paddr + size),
793 cattr_name(flags));
794 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
795 (~_PAGE_CACHE_MASK)) |
796 flags);
797 }
4bb9c5c0 798 return 0;
d886c73c 799 }
5899329b 800
801 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
802 if (ret)
803 return ret;
804
805 if (flags != want_flags) {
1adcaafe
SS
806 if (strict_prot ||
807 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
cdecff68 808 free_memtype(paddr, paddr + size);
809 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
810 " for %Lx-%Lx, got %s\n",
811 current->comm, current->pid,
812 cattr_name(want_flags),
813 (unsigned long long)paddr,
814 (unsigned long long)(paddr + size),
815 cattr_name(flags));
816 return -EINVAL;
817 }
818 /*
819 * We allow returning different type than the one requested in
820 * non strict case.
821 */
822 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
823 (~_PAGE_CACHE_MASK)) |
824 flags);
5899329b 825 }
826
7880f746 827 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
5899329b 828 free_memtype(paddr, paddr + size);
5899329b 829 return -EINVAL;
830 }
831 return 0;
832}
833
834/*
835 * Internal interface to free a range of physical memory.
836 * Frees non RAM regions only.
837 */
838static void free_pfn_range(u64 paddr, unsigned long size)
839{
840 int is_ram;
841
be03d9e8 842 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 843 if (is_ram == 0)
844 free_memtype(paddr, paddr + size);
845}
846
847/*
848 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
849 * copied through copy_page_range().
850 *
851 * If the vma has a linear pfn mapping for the entire range, we get the prot
852 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 853 */
854int track_pfn_vma_copy(struct vm_area_struct *vma)
855{
c1c15b65 856 resource_size_t paddr;
982d789a 857 unsigned long prot;
4b065046 858 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 859 pgprot_t pgprot;
5899329b 860
5899329b 861 if (is_linear_pfn_mapping(vma)) {
862 /*
982d789a 863 * reserve the whole chunk covered by vma. We need the
864 * starting address and protection from pte.
5899329b 865 */
4b065046 866 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 867 WARN_ON_ONCE(1);
982d789a 868 return -EINVAL;
5899329b 869 }
cdecff68 870 pgprot = __pgprot(prot);
871 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 872 }
873
5899329b 874 return 0;
5899329b 875}
876
877/*
878 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
879 * for physical range indicated by pfn and size.
880 *
881 * prot is passed in as a parameter for the new mapping. If the vma has a
882 * linear pfn mapping for the entire range reserve the entire vma range with
883 * single reserve_pfn_range call.
5899329b 884 */
e4b866ed 885int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
5899329b 886 unsigned long pfn, unsigned long size)
887{
10876376 888 unsigned long flags;
c1c15b65 889 resource_size_t paddr;
4b065046 890 unsigned long vma_size = vma->vm_end - vma->vm_start;
5899329b 891
5899329b 892 if (is_linear_pfn_mapping(vma)) {
893 /* reserve the whole chunk starting from vm_pgoff */
c1c15b65 894 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
cdecff68 895 return reserve_pfn_range(paddr, vma_size, prot, 0);
5899329b 896 }
897
10876376
VP
898 if (!pat_enabled)
899 return 0;
900
901 /* for vm_insert_pfn and friends, we set prot based on lookup */
902 flags = lookup_memtype(pfn << PAGE_SHIFT);
903 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
904 flags);
905
5899329b 906 return 0;
5899329b 907}
908
909/*
910 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
911 * untrack can be called for a specific region indicated by pfn and size or
912 * can be for the entire vma (in which case size can be zero).
913 */
914void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
915 unsigned long size)
916{
c1c15b65 917 resource_size_t paddr;
4b065046 918 unsigned long vma_size = vma->vm_end - vma->vm_start;
5899329b 919
5899329b 920 if (is_linear_pfn_mapping(vma)) {
921 /* free the whole chunk starting from vm_pgoff */
c1c15b65 922 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
5899329b 923 free_pfn_range(paddr, vma_size);
924 return;
925 }
5899329b 926}
927
2520bd31 928pgprot_t pgprot_writecombine(pgprot_t prot)
929{
930 if (pat_enabled)
931 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
932 else
933 return pgprot_noncached(prot);
934}
92b9af9e 935EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 936
012f09e7 937#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 938
939/* get Nth element of the linked list */
940static struct memtype *memtype_get_idx(loff_t pos)
941{
942 struct memtype *list_node, *print_entry;
943 int i = 1;
944
945 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
946 if (!print_entry)
947 return NULL;
948
949 spin_lock(&memtype_lock);
950 list_for_each_entry(list_node, &memtype_list, nd) {
951 if (pos == i) {
952 *print_entry = *list_node;
953 spin_unlock(&memtype_lock);
954 return print_entry;
955 }
956 ++i;
957 }
958 spin_unlock(&memtype_lock);
959 kfree(print_entry);
ad2cde16 960
fec0962e 961 return NULL;
962}
963
964static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
965{
966 if (*pos == 0) {
967 ++*pos;
968 seq_printf(seq, "PAT memtype list:\n");
969 }
970
971 return memtype_get_idx(*pos);
972}
973
974static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
975{
976 ++*pos;
977 return memtype_get_idx(*pos);
978}
979
980static void memtype_seq_stop(struct seq_file *seq, void *v)
981{
982}
983
984static int memtype_seq_show(struct seq_file *seq, void *v)
985{
986 struct memtype *print_entry = (struct memtype *)v;
987
988 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
989 print_entry->start, print_entry->end);
990 kfree(print_entry);
ad2cde16 991
fec0962e 992 return 0;
993}
994
d535e431 995static const struct seq_operations memtype_seq_ops = {
fec0962e 996 .start = memtype_seq_start,
997 .next = memtype_seq_next,
998 .stop = memtype_seq_stop,
999 .show = memtype_seq_show,
1000};
1001
1002static int memtype_seq_open(struct inode *inode, struct file *file)
1003{
1004 return seq_open(file, &memtype_seq_ops);
1005}
1006
1007static const struct file_operations memtype_fops = {
1008 .open = memtype_seq_open,
1009 .read = seq_read,
1010 .llseek = seq_lseek,
1011 .release = seq_release,
1012};
1013
1014static int __init pat_memtype_list_init(void)
1015{
dd4377b0
XF
1016 if (pat_enabled) {
1017 debugfs_create_file("pat_memtype_list", S_IRUSR,
1018 arch_debugfs_dir, NULL, &memtype_fops);
1019 }
fec0962e 1020 return 0;
1021}
1022
1023late_initcall(pat_memtype_list_init);
1024
012f09e7 1025#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */