Merge commit 'ftrace/function-graph' into next
[deliverable/linux.git] / arch / x86 / mm / pat.c
1 /*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/gfp.h>
15 #include <linux/mm.h>
16 #include <linux/fs.h>
17
18 #include <asm/cacheflush.h>
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgtable.h>
22 #include <asm/fcntl.h>
23 #include <asm/e820.h>
24 #include <asm/mtrr.h>
25 #include <asm/page.h>
26 #include <asm/msr.h>
27 #include <asm/pat.h>
28 #include <asm/io.h>
29
30 #ifdef CONFIG_X86_PAT
31 int __read_mostly pat_enabled = 1;
32
33 void __cpuinit pat_disable(char *reason)
34 {
35 pat_enabled = 0;
36 printk(KERN_INFO "%s\n", reason);
37 }
38
39 static int __init nopat(char *str)
40 {
41 pat_disable("PAT support disabled.");
42 return 0;
43 }
44 early_param("nopat", nopat);
45 #endif
46
47
48 static int debug_enable;
49
50 static int __init pat_debug_setup(char *str)
51 {
52 debug_enable = 1;
53 return 0;
54 }
55 __setup("debugpat", pat_debug_setup);
56
57 #define dprintk(fmt, arg...) \
58 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
59
60
61 static u64 __read_mostly boot_pat_state;
62
63 enum {
64 PAT_UC = 0, /* uncached */
65 PAT_WC = 1, /* Write combining */
66 PAT_WT = 4, /* Write Through */
67 PAT_WP = 5, /* Write Protected */
68 PAT_WB = 6, /* Write Back (default) */
69 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
70 };
71
72 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
73
74 void pat_init(void)
75 {
76 u64 pat;
77
78 if (!pat_enabled)
79 return;
80
81 /* Paranoia check. */
82 if (!cpu_has_pat && boot_pat_state) {
83 /*
84 * If this happens we are on a secondary CPU, but
85 * switched to PAT on the boot CPU. We have no way to
86 * undo PAT.
87 */
88 printk(KERN_ERR "PAT enabled, "
89 "but not supported by secondary CPU\n");
90 BUG();
91 }
92
93 /* Set PWT to Write-Combining. All other bits stay the same */
94 /*
95 * PTE encoding used in Linux:
96 * PAT
97 * |PCD
98 * ||PWT
99 * |||
100 * 000 WB _PAGE_CACHE_WB
101 * 001 WC _PAGE_CACHE_WC
102 * 010 UC- _PAGE_CACHE_UC_MINUS
103 * 011 UC _PAGE_CACHE_UC
104 * PAT bit unused
105 */
106 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
107 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
108
109 /* Boot CPU check */
110 if (!boot_pat_state)
111 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
112
113 wrmsrl(MSR_IA32_CR_PAT, pat);
114 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
115 smp_processor_id(), boot_pat_state, pat);
116 }
117
118 #undef PAT
119
120 static char *cattr_name(unsigned long flags)
121 {
122 switch (flags & _PAGE_CACHE_MASK) {
123 case _PAGE_CACHE_UC: return "uncached";
124 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
125 case _PAGE_CACHE_WB: return "write-back";
126 case _PAGE_CACHE_WC: return "write-combining";
127 default: return "broken";
128 }
129 }
130
131 /*
132 * The global memtype list keeps track of memory type for specific
133 * physical memory areas. Conflicting memory types in different
134 * mappings can cause CPU cache corruption. To avoid this we keep track.
135 *
136 * The list is sorted based on starting address and can contain multiple
137 * entries for each address (this allows reference counting for overlapping
138 * areas). All the aliases have the same cache attributes of course.
139 * Zero attributes are represented as holes.
140 *
141 * Currently the data structure is a list because the number of mappings
142 * are expected to be relatively small. If this should be a problem
143 * it could be changed to a rbtree or similar.
144 *
145 * memtype_lock protects the whole list.
146 */
147
148 struct memtype {
149 u64 start;
150 u64 end;
151 unsigned long type;
152 struct list_head nd;
153 };
154
155 static LIST_HEAD(memtype_list);
156 static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
157
158 /*
159 * Does intersection of PAT memory type and MTRR memory type and returns
160 * the resulting memory type as PAT understands it.
161 * (Type in pat and mtrr will not have same value)
162 * The intersection is based on "Effective Memory Type" tables in IA-32
163 * SDM vol 3a
164 */
165 static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
166 {
167 /*
168 * Look for MTRR hint to get the effective type in case where PAT
169 * request is for WB.
170 */
171 if (req_type == _PAGE_CACHE_WB) {
172 u8 mtrr_type;
173
174 mtrr_type = mtrr_type_lookup(start, end);
175 if (mtrr_type == MTRR_TYPE_UNCACHABLE)
176 return _PAGE_CACHE_UC;
177 if (mtrr_type == MTRR_TYPE_WRCOMB)
178 return _PAGE_CACHE_WC;
179 }
180
181 return req_type;
182 }
183
184 static int
185 chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
186 {
187 if (new->type != entry->type) {
188 if (type) {
189 new->type = entry->type;
190 *type = entry->type;
191 } else
192 goto conflict;
193 }
194
195 /* check overlaps with more than one entry in the list */
196 list_for_each_entry_continue(entry, &memtype_list, nd) {
197 if (new->end <= entry->start)
198 break;
199 else if (new->type != entry->type)
200 goto conflict;
201 }
202 return 0;
203
204 conflict:
205 printk(KERN_INFO "%s:%d conflicting memory types "
206 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
207 new->end, cattr_name(new->type), cattr_name(entry->type));
208 return -EBUSY;
209 }
210
211 static struct memtype *cached_entry;
212 static u64 cached_start;
213
214 static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
215 {
216 int ram_page = 0, not_rampage = 0;
217 unsigned long page_nr;
218
219 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
220 ++page_nr) {
221 /*
222 * For legacy reasons, physical address range in the legacy ISA
223 * region is tracked as non-RAM. This will allow users of
224 * /dev/mem to map portions of legacy ISA region, even when
225 * some of those portions are listed(or not even listed) with
226 * different e820 types(RAM/reserved/..)
227 */
228 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
229 page_is_ram(page_nr))
230 ram_page = 1;
231 else
232 not_rampage = 1;
233
234 if (ram_page == not_rampage)
235 return -1;
236 }
237
238 return ram_page;
239 }
240
241 /*
242 * For RAM pages, mark the pages as non WB memory type using
243 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
244 * set_memory_wc() on a RAM page at a time before marking it as WB again.
245 * This is ok, because only one driver will be owning the page and
246 * doing set_memory_*() calls.
247 *
248 * For now, we use PageNonWB to track that the RAM page is being mapped
249 * as non WB. In future, we will have to use one more flag
250 * (or some other mechanism in page_struct) to distinguish between
251 * UC and WC mapping.
252 */
253 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
254 unsigned long *new_type)
255 {
256 struct page *page;
257 u64 pfn, end_pfn;
258
259 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
260 page = pfn_to_page(pfn);
261 if (page_mapped(page) || PageNonWB(page))
262 goto out;
263
264 SetPageNonWB(page);
265 }
266 return 0;
267
268 out:
269 end_pfn = pfn;
270 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
271 page = pfn_to_page(pfn);
272 ClearPageNonWB(page);
273 }
274
275 return -EINVAL;
276 }
277
278 static int free_ram_pages_type(u64 start, u64 end)
279 {
280 struct page *page;
281 u64 pfn, end_pfn;
282
283 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
284 page = pfn_to_page(pfn);
285 if (page_mapped(page) || !PageNonWB(page))
286 goto out;
287
288 ClearPageNonWB(page);
289 }
290 return 0;
291
292 out:
293 end_pfn = pfn;
294 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
295 page = pfn_to_page(pfn);
296 SetPageNonWB(page);
297 }
298 return -EINVAL;
299 }
300
301 /*
302 * req_type typically has one of the:
303 * - _PAGE_CACHE_WB
304 * - _PAGE_CACHE_WC
305 * - _PAGE_CACHE_UC_MINUS
306 * - _PAGE_CACHE_UC
307 *
308 * req_type will have a special case value '-1', when requester want to inherit
309 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
310 *
311 * If new_type is NULL, function will return an error if it cannot reserve the
312 * region with req_type. If new_type is non-NULL, function will return
313 * available type in new_type in case of no error. In case of any error
314 * it will return a negative return value.
315 */
316 int reserve_memtype(u64 start, u64 end, unsigned long req_type,
317 unsigned long *new_type)
318 {
319 struct memtype *new, *entry;
320 unsigned long actual_type;
321 struct list_head *where;
322 int is_range_ram;
323 int err = 0;
324
325 BUG_ON(start >= end); /* end is exclusive */
326
327 if (!pat_enabled) {
328 /* This is identical to page table setting without PAT */
329 if (new_type) {
330 if (req_type == -1)
331 *new_type = _PAGE_CACHE_WB;
332 else
333 *new_type = req_type & _PAGE_CACHE_MASK;
334 }
335 return 0;
336 }
337
338 /* Low ISA region is always mapped WB in page table. No need to track */
339 if (is_ISA_range(start, end - 1)) {
340 if (new_type)
341 *new_type = _PAGE_CACHE_WB;
342 return 0;
343 }
344
345 if (req_type == -1) {
346 /*
347 * Call mtrr_lookup to get the type hint. This is an
348 * optimization for /dev/mem mmap'ers into WB memory (BIOS
349 * tools and ACPI tools). Use WB request for WB memory and use
350 * UC_MINUS otherwise.
351 */
352 u8 mtrr_type = mtrr_type_lookup(start, end);
353
354 if (mtrr_type == MTRR_TYPE_WRBACK)
355 actual_type = _PAGE_CACHE_WB;
356 else
357 actual_type = _PAGE_CACHE_UC_MINUS;
358 } else {
359 actual_type = pat_x_mtrr_type(start, end,
360 req_type & _PAGE_CACHE_MASK);
361 }
362
363 if (new_type)
364 *new_type = actual_type;
365
366 is_range_ram = pat_pagerange_is_ram(start, end);
367 if (is_range_ram == 1)
368 return reserve_ram_pages_type(start, end, req_type,
369 new_type);
370 else if (is_range_ram < 0)
371 return -EINVAL;
372
373 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
374 if (!new)
375 return -ENOMEM;
376
377 new->start = start;
378 new->end = end;
379 new->type = actual_type;
380
381 spin_lock(&memtype_lock);
382
383 if (cached_entry && start >= cached_start)
384 entry = cached_entry;
385 else
386 entry = list_entry(&memtype_list, struct memtype, nd);
387
388 /* Search for existing mapping that overlaps the current range */
389 where = NULL;
390 list_for_each_entry_continue(entry, &memtype_list, nd) {
391 if (end <= entry->start) {
392 where = entry->nd.prev;
393 cached_entry = list_entry(where, struct memtype, nd);
394 break;
395 } else if (start <= entry->start) { /* end > entry->start */
396 err = chk_conflict(new, entry, new_type);
397 if (!err) {
398 dprintk("Overlap at 0x%Lx-0x%Lx\n",
399 entry->start, entry->end);
400 where = entry->nd.prev;
401 cached_entry = list_entry(where,
402 struct memtype, nd);
403 }
404 break;
405 } else if (start < entry->end) { /* start > entry->start */
406 err = chk_conflict(new, entry, new_type);
407 if (!err) {
408 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry->start, entry->end);
410 cached_entry = list_entry(entry->nd.prev,
411 struct memtype, nd);
412
413 /*
414 * Move to right position in the linked
415 * list to add this new entry
416 */
417 list_for_each_entry_continue(entry,
418 &memtype_list, nd) {
419 if (start <= entry->start) {
420 where = entry->nd.prev;
421 break;
422 }
423 }
424 }
425 break;
426 }
427 }
428
429 if (err) {
430 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
431 "track %s, req %s\n",
432 start, end, cattr_name(new->type), cattr_name(req_type));
433 kfree(new);
434 spin_unlock(&memtype_lock);
435
436 return err;
437 }
438
439 cached_start = start;
440
441 if (where)
442 list_add(&new->nd, where);
443 else
444 list_add_tail(&new->nd, &memtype_list);
445
446 spin_unlock(&memtype_lock);
447
448 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
449 start, end, cattr_name(new->type), cattr_name(req_type),
450 new_type ? cattr_name(*new_type) : "-");
451
452 return err;
453 }
454
455 int free_memtype(u64 start, u64 end)
456 {
457 struct memtype *entry;
458 int err = -EINVAL;
459 int is_range_ram;
460
461 if (!pat_enabled)
462 return 0;
463
464 /* Low ISA region is always mapped WB. No need to track */
465 if (is_ISA_range(start, end - 1))
466 return 0;
467
468 is_range_ram = pat_pagerange_is_ram(start, end);
469 if (is_range_ram == 1)
470 return free_ram_pages_type(start, end);
471 else if (is_range_ram < 0)
472 return -EINVAL;
473
474 spin_lock(&memtype_lock);
475 list_for_each_entry(entry, &memtype_list, nd) {
476 if (entry->start == start && entry->end == end) {
477 if (cached_entry == entry || cached_start == start)
478 cached_entry = NULL;
479
480 list_del(&entry->nd);
481 kfree(entry);
482 err = 0;
483 break;
484 }
485 }
486 spin_unlock(&memtype_lock);
487
488 if (err) {
489 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
490 current->comm, current->pid, start, end);
491 }
492
493 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
494
495 return err;
496 }
497
498
499 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
500 unsigned long size, pgprot_t vma_prot)
501 {
502 return vma_prot;
503 }
504
505 #ifdef CONFIG_STRICT_DEVMEM
506 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
507 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
508 {
509 return 1;
510 }
511 #else
512 /* This check is needed to avoid cache aliasing when PAT is enabled */
513 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
514 {
515 u64 from = ((u64)pfn) << PAGE_SHIFT;
516 u64 to = from + size;
517 u64 cursor = from;
518
519 if (!pat_enabled)
520 return 1;
521
522 while (cursor < to) {
523 if (!devmem_is_allowed(pfn)) {
524 printk(KERN_INFO
525 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
526 current->comm, from, to);
527 return 0;
528 }
529 cursor += PAGE_SIZE;
530 pfn++;
531 }
532 return 1;
533 }
534 #endif /* CONFIG_STRICT_DEVMEM */
535
536 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
537 unsigned long size, pgprot_t *vma_prot)
538 {
539 u64 offset = ((u64) pfn) << PAGE_SHIFT;
540 unsigned long flags = -1;
541 int retval;
542
543 if (!range_is_allowed(pfn, size))
544 return 0;
545
546 if (file->f_flags & O_SYNC) {
547 flags = _PAGE_CACHE_UC_MINUS;
548 }
549
550 #ifdef CONFIG_X86_32
551 /*
552 * On the PPro and successors, the MTRRs are used to set
553 * memory types for physical addresses outside main memory,
554 * so blindly setting UC or PWT on those pages is wrong.
555 * For Pentiums and earlier, the surround logic should disable
556 * caching for the high addresses through the KEN pin, but
557 * we maintain the tradition of paranoia in this code.
558 */
559 if (!pat_enabled &&
560 !(boot_cpu_has(X86_FEATURE_MTRR) ||
561 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
562 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
563 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
564 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
565 flags = _PAGE_CACHE_UC;
566 }
567 #endif
568
569 /*
570 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
571 *
572 * Without O_SYNC, we want to get
573 * - WB for WB-able memory and no other conflicting mappings
574 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
575 * - Inherit from confliting mappings otherwise
576 */
577 if (flags != -1) {
578 retval = reserve_memtype(offset, offset + size, flags, NULL);
579 } else {
580 retval = reserve_memtype(offset, offset + size, -1, &flags);
581 }
582
583 if (retval < 0)
584 return 0;
585
586 if (((pfn < max_low_pfn_mapped) ||
587 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
588 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
589 free_memtype(offset, offset + size);
590 printk(KERN_INFO
591 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
592 current->comm, current->pid,
593 cattr_name(flags),
594 offset, (unsigned long long)(offset + size));
595 return 0;
596 }
597
598 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
599 flags);
600 return 1;
601 }
602
603 void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
604 {
605 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
606 u64 addr = (u64)pfn << PAGE_SHIFT;
607 unsigned long flags;
608
609 reserve_memtype(addr, addr + size, want_flags, &flags);
610 if (flags != want_flags) {
611 printk(KERN_INFO
612 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
613 current->comm, current->pid,
614 cattr_name(want_flags),
615 addr, (unsigned long long)(addr + size),
616 cattr_name(flags));
617 }
618 }
619
620 void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
621 {
622 u64 addr = (u64)pfn << PAGE_SHIFT;
623
624 free_memtype(addr, addr + size);
625 }
626
627 /*
628 * Internal interface to reserve a range of physical memory with prot.
629 * Reserved non RAM regions only and after successful reserve_memtype,
630 * this func also keeps identity mapping (if any) in sync with this new prot.
631 */
632 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
633 int strict_prot)
634 {
635 int is_ram = 0;
636 int id_sz, ret;
637 unsigned long flags;
638 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
639
640 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
641
642 /*
643 * reserve_pfn_range() doesn't support RAM pages.
644 */
645 if (is_ram != 0)
646 return -EINVAL;
647
648 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
649 if (ret)
650 return ret;
651
652 if (flags != want_flags) {
653 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
654 free_memtype(paddr, paddr + size);
655 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
656 " for %Lx-%Lx, got %s\n",
657 current->comm, current->pid,
658 cattr_name(want_flags),
659 (unsigned long long)paddr,
660 (unsigned long long)(paddr + size),
661 cattr_name(flags));
662 return -EINVAL;
663 }
664 /*
665 * We allow returning different type than the one requested in
666 * non strict case.
667 */
668 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
669 (~_PAGE_CACHE_MASK)) |
670 flags);
671 }
672
673 /* Need to keep identity mapping in sync */
674 if (paddr >= __pa(high_memory))
675 return 0;
676
677 id_sz = (__pa(high_memory) < paddr + size) ?
678 __pa(high_memory) - paddr :
679 size;
680
681 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
682 free_memtype(paddr, paddr + size);
683 printk(KERN_ERR
684 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
685 "for %Lx-%Lx\n",
686 current->comm, current->pid,
687 cattr_name(flags),
688 (unsigned long long)paddr,
689 (unsigned long long)(paddr + size));
690 return -EINVAL;
691 }
692 return 0;
693 }
694
695 /*
696 * Internal interface to free a range of physical memory.
697 * Frees non RAM regions only.
698 */
699 static void free_pfn_range(u64 paddr, unsigned long size)
700 {
701 int is_ram;
702
703 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
704 if (is_ram == 0)
705 free_memtype(paddr, paddr + size);
706 }
707
708 /*
709 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
710 * copied through copy_page_range().
711 *
712 * If the vma has a linear pfn mapping for the entire range, we get the prot
713 * from pte and reserve the entire vma range with single reserve_pfn_range call.
714 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
715 * by page to get physical address and protection.
716 */
717 int track_pfn_vma_copy(struct vm_area_struct *vma)
718 {
719 int retval = 0;
720 unsigned long i, j;
721 resource_size_t paddr;
722 unsigned long prot;
723 unsigned long vma_start = vma->vm_start;
724 unsigned long vma_end = vma->vm_end;
725 unsigned long vma_size = vma_end - vma_start;
726 pgprot_t pgprot;
727
728 if (!pat_enabled)
729 return 0;
730
731 if (is_linear_pfn_mapping(vma)) {
732 /*
733 * reserve the whole chunk covered by vma. We need the
734 * starting address and protection from pte.
735 */
736 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
737 WARN_ON_ONCE(1);
738 return -EINVAL;
739 }
740 pgprot = __pgprot(prot);
741 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
742 }
743
744 /* reserve entire vma page by page, using pfn and prot from pte */
745 for (i = 0; i < vma_size; i += PAGE_SIZE) {
746 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
747 continue;
748
749 pgprot = __pgprot(prot);
750 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
751 if (retval)
752 goto cleanup_ret;
753 }
754 return 0;
755
756 cleanup_ret:
757 /* Reserve error: Cleanup partial reservation and return error */
758 for (j = 0; j < i; j += PAGE_SIZE) {
759 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
760 continue;
761
762 free_pfn_range(paddr, PAGE_SIZE);
763 }
764
765 return retval;
766 }
767
768 /*
769 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
770 * for physical range indicated by pfn and size.
771 *
772 * prot is passed in as a parameter for the new mapping. If the vma has a
773 * linear pfn mapping for the entire range reserve the entire vma range with
774 * single reserve_pfn_range call.
775 * Otherwise, we look t the pfn and size and reserve only the specified range
776 * page by page.
777 *
778 * Note that this function can be called with caller trying to map only a
779 * subrange/page inside the vma.
780 */
781 int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
782 unsigned long pfn, unsigned long size)
783 {
784 int retval = 0;
785 unsigned long i, j;
786 resource_size_t base_paddr;
787 resource_size_t paddr;
788 unsigned long vma_start = vma->vm_start;
789 unsigned long vma_end = vma->vm_end;
790 unsigned long vma_size = vma_end - vma_start;
791
792 if (!pat_enabled)
793 return 0;
794
795 if (is_linear_pfn_mapping(vma)) {
796 /* reserve the whole chunk starting from vm_pgoff */
797 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
798 return reserve_pfn_range(paddr, vma_size, prot, 0);
799 }
800
801 /* reserve page by page using pfn and size */
802 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
803 for (i = 0; i < size; i += PAGE_SIZE) {
804 paddr = base_paddr + i;
805 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
806 if (retval)
807 goto cleanup_ret;
808 }
809 return 0;
810
811 cleanup_ret:
812 /* Reserve error: Cleanup partial reservation and return error */
813 for (j = 0; j < i; j += PAGE_SIZE) {
814 paddr = base_paddr + j;
815 free_pfn_range(paddr, PAGE_SIZE);
816 }
817
818 return retval;
819 }
820
821 /*
822 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
823 * untrack can be called for a specific region indicated by pfn and size or
824 * can be for the entire vma (in which case size can be zero).
825 */
826 void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
827 unsigned long size)
828 {
829 unsigned long i;
830 resource_size_t paddr;
831 unsigned long prot;
832 unsigned long vma_start = vma->vm_start;
833 unsigned long vma_end = vma->vm_end;
834 unsigned long vma_size = vma_end - vma_start;
835
836 if (!pat_enabled)
837 return;
838
839 if (is_linear_pfn_mapping(vma)) {
840 /* free the whole chunk starting from vm_pgoff */
841 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
842 free_pfn_range(paddr, vma_size);
843 return;
844 }
845
846 if (size != 0 && size != vma_size) {
847 /* free page by page, using pfn and size */
848 paddr = (resource_size_t)pfn << PAGE_SHIFT;
849 for (i = 0; i < size; i += PAGE_SIZE) {
850 paddr = paddr + i;
851 free_pfn_range(paddr, PAGE_SIZE);
852 }
853 } else {
854 /* free entire vma, page by page, using the pfn from pte */
855 for (i = 0; i < vma_size; i += PAGE_SIZE) {
856 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
857 continue;
858
859 free_pfn_range(paddr, PAGE_SIZE);
860 }
861 }
862 }
863
864 pgprot_t pgprot_writecombine(pgprot_t prot)
865 {
866 if (pat_enabled)
867 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
868 else
869 return pgprot_noncached(prot);
870 }
871
872 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
873
874 /* get Nth element of the linked list */
875 static struct memtype *memtype_get_idx(loff_t pos)
876 {
877 struct memtype *list_node, *print_entry;
878 int i = 1;
879
880 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
881 if (!print_entry)
882 return NULL;
883
884 spin_lock(&memtype_lock);
885 list_for_each_entry(list_node, &memtype_list, nd) {
886 if (pos == i) {
887 *print_entry = *list_node;
888 spin_unlock(&memtype_lock);
889 return print_entry;
890 }
891 ++i;
892 }
893 spin_unlock(&memtype_lock);
894 kfree(print_entry);
895
896 return NULL;
897 }
898
899 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
900 {
901 if (*pos == 0) {
902 ++*pos;
903 seq_printf(seq, "PAT memtype list:\n");
904 }
905
906 return memtype_get_idx(*pos);
907 }
908
909 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
910 {
911 ++*pos;
912 return memtype_get_idx(*pos);
913 }
914
915 static void memtype_seq_stop(struct seq_file *seq, void *v)
916 {
917 }
918
919 static int memtype_seq_show(struct seq_file *seq, void *v)
920 {
921 struct memtype *print_entry = (struct memtype *)v;
922
923 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
924 print_entry->start, print_entry->end);
925 kfree(print_entry);
926
927 return 0;
928 }
929
930 static struct seq_operations memtype_seq_ops = {
931 .start = memtype_seq_start,
932 .next = memtype_seq_next,
933 .stop = memtype_seq_stop,
934 .show = memtype_seq_show,
935 };
936
937 static int memtype_seq_open(struct inode *inode, struct file *file)
938 {
939 return seq_open(file, &memtype_seq_ops);
940 }
941
942 static const struct file_operations memtype_fops = {
943 .open = memtype_seq_open,
944 .read = seq_read,
945 .llseek = seq_lseek,
946 .release = seq_release,
947 };
948
949 static int __init pat_memtype_list_init(void)
950 {
951 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
952 NULL, &memtype_fops);
953 return 0;
954 }
955
956 late_initcall(pat_memtype_list_init);
957
958 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
This page took 0.095783 seconds and 5 git commands to generate.