2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/gfp.h>
18 #include <asm/cacheflush.h>
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgtable.h>
22 #include <asm/fcntl.h>
31 int __read_mostly pat_enabled
= 1;
33 void __cpuinit
pat_disable(char *reason
)
36 printk(KERN_INFO
"%s\n", reason
);
39 static int __init
nopat(char *str
)
41 pat_disable("PAT support disabled.");
44 early_param("nopat", nopat
);
48 static int debug_enable
;
50 static int __init
pat_debug_setup(char *str
)
55 __setup("debugpat", pat_debug_setup
);
57 #define dprintk(fmt, arg...) \
58 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
61 static u64 __read_mostly boot_pat_state
;
64 PAT_UC
= 0, /* uncached */
65 PAT_WC
= 1, /* Write combining */
66 PAT_WT
= 4, /* Write Through */
67 PAT_WP
= 5, /* Write Protected */
68 PAT_WB
= 6, /* Write Back (default) */
69 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
72 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
82 if (!cpu_has_pat
&& boot_pat_state
) {
84 * If this happens we are on a secondary CPU, but
85 * switched to PAT on the boot CPU. We have no way to
88 printk(KERN_ERR
"PAT enabled, "
89 "but not supported by secondary CPU\n");
93 /* Set PWT to Write-Combining. All other bits stay the same */
95 * PTE encoding used in Linux:
100 * 000 WB _PAGE_CACHE_WB
101 * 001 WC _PAGE_CACHE_WC
102 * 010 UC- _PAGE_CACHE_UC_MINUS
103 * 011 UC _PAGE_CACHE_UC
106 pat
= PAT(0, WB
) | PAT(1, WC
) | PAT(2, UC_MINUS
) | PAT(3, UC
) |
107 PAT(4, WB
) | PAT(5, WC
) | PAT(6, UC_MINUS
) | PAT(7, UC
);
111 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
113 wrmsrl(MSR_IA32_CR_PAT
, pat
);
114 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
115 smp_processor_id(), boot_pat_state
, pat
);
120 static char *cattr_name(unsigned long flags
)
122 switch (flags
& _PAGE_CACHE_MASK
) {
123 case _PAGE_CACHE_UC
: return "uncached";
124 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
125 case _PAGE_CACHE_WB
: return "write-back";
126 case _PAGE_CACHE_WC
: return "write-combining";
127 default: return "broken";
132 * The global memtype list keeps track of memory type for specific
133 * physical memory areas. Conflicting memory types in different
134 * mappings can cause CPU cache corruption. To avoid this we keep track.
136 * The list is sorted based on starting address and can contain multiple
137 * entries for each address (this allows reference counting for overlapping
138 * areas). All the aliases have the same cache attributes of course.
139 * Zero attributes are represented as holes.
141 * Currently the data structure is a list because the number of mappings
142 * are expected to be relatively small. If this should be a problem
143 * it could be changed to a rbtree or similar.
145 * memtype_lock protects the whole list.
155 static LIST_HEAD(memtype_list
);
156 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
159 * Does intersection of PAT memory type and MTRR memory type and returns
160 * the resulting memory type as PAT understands it.
161 * (Type in pat and mtrr will not have same value)
162 * The intersection is based on "Effective Memory Type" tables in IA-32
165 static unsigned long pat_x_mtrr_type(u64 start
, u64 end
, unsigned long req_type
)
168 * Look for MTRR hint to get the effective type in case where PAT
171 if (req_type
== _PAGE_CACHE_WB
) {
174 mtrr_type
= mtrr_type_lookup(start
, end
);
175 if (mtrr_type
== MTRR_TYPE_UNCACHABLE
)
176 return _PAGE_CACHE_UC
;
177 if (mtrr_type
== MTRR_TYPE_WRCOMB
)
178 return _PAGE_CACHE_WC
;
185 chk_conflict(struct memtype
*new, struct memtype
*entry
, unsigned long *type
)
187 if (new->type
!= entry
->type
) {
189 new->type
= entry
->type
;
195 /* check overlaps with more than one entry in the list */
196 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
197 if (new->end
<= entry
->start
)
199 else if (new->type
!= entry
->type
)
205 printk(KERN_INFO
"%s:%d conflicting memory types "
206 "%Lx-%Lx %s<->%s\n", current
->comm
, current
->pid
, new->start
,
207 new->end
, cattr_name(new->type
), cattr_name(entry
->type
));
211 static struct memtype
*cached_entry
;
212 static u64 cached_start
;
214 static int pat_pagerange_is_ram(unsigned long start
, unsigned long end
)
216 int ram_page
= 0, not_rampage
= 0;
217 unsigned long page_nr
;
219 for (page_nr
= (start
>> PAGE_SHIFT
); page_nr
< (end
>> PAGE_SHIFT
);
222 * For legacy reasons, physical address range in the legacy ISA
223 * region is tracked as non-RAM. This will allow users of
224 * /dev/mem to map portions of legacy ISA region, even when
225 * some of those portions are listed(or not even listed) with
226 * different e820 types(RAM/reserved/..)
228 if (page_nr
>= (ISA_END_ADDRESS
>> PAGE_SHIFT
) &&
229 page_is_ram(page_nr
))
234 if (ram_page
== not_rampage
)
242 * For RAM pages, mark the pages as non WB memory type using
243 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
244 * set_memory_wc() on a RAM page at a time before marking it as WB again.
245 * This is ok, because only one driver will be owning the page and
246 * doing set_memory_*() calls.
248 * For now, we use PageNonWB to track that the RAM page is being mapped
249 * as non WB. In future, we will have to use one more flag
250 * (or some other mechanism in page_struct) to distinguish between
253 static int reserve_ram_pages_type(u64 start
, u64 end
, unsigned long req_type
,
254 unsigned long *new_type
)
259 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
260 page
= pfn_to_page(pfn
);
261 if (page_mapped(page
) || PageNonWB(page
))
270 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< end_pfn
; ++pfn
) {
271 page
= pfn_to_page(pfn
);
272 ClearPageNonWB(page
);
278 static int free_ram_pages_type(u64 start
, u64 end
)
283 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< (end
>> PAGE_SHIFT
); ++pfn
) {
284 page
= pfn_to_page(pfn
);
285 if (page_mapped(page
) || !PageNonWB(page
))
288 ClearPageNonWB(page
);
294 for (pfn
= (start
>> PAGE_SHIFT
); pfn
< end_pfn
; ++pfn
) {
295 page
= pfn_to_page(pfn
);
302 * req_type typically has one of the:
305 * - _PAGE_CACHE_UC_MINUS
308 * req_type will have a special case value '-1', when requester want to inherit
309 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
311 * If new_type is NULL, function will return an error if it cannot reserve the
312 * region with req_type. If new_type is non-NULL, function will return
313 * available type in new_type in case of no error. In case of any error
314 * it will return a negative return value.
316 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
317 unsigned long *new_type
)
319 struct memtype
*new, *entry
;
320 unsigned long actual_type
;
321 struct list_head
*where
;
325 BUG_ON(start
>= end
); /* end is exclusive */
328 /* This is identical to page table setting without PAT */
331 *new_type
= _PAGE_CACHE_WB
;
333 *new_type
= req_type
& _PAGE_CACHE_MASK
;
338 /* Low ISA region is always mapped WB in page table. No need to track */
339 if (is_ISA_range(start
, end
- 1)) {
341 *new_type
= _PAGE_CACHE_WB
;
345 if (req_type
== -1) {
347 * Call mtrr_lookup to get the type hint. This is an
348 * optimization for /dev/mem mmap'ers into WB memory (BIOS
349 * tools and ACPI tools). Use WB request for WB memory and use
350 * UC_MINUS otherwise.
352 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
354 if (mtrr_type
== MTRR_TYPE_WRBACK
)
355 actual_type
= _PAGE_CACHE_WB
;
357 actual_type
= _PAGE_CACHE_UC_MINUS
;
359 actual_type
= pat_x_mtrr_type(start
, end
,
360 req_type
& _PAGE_CACHE_MASK
);
364 *new_type
= actual_type
;
366 is_range_ram
= pat_pagerange_is_ram(start
, end
);
367 if (is_range_ram
== 1)
368 return reserve_ram_pages_type(start
, end
, req_type
,
370 else if (is_range_ram
< 0)
373 new = kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
379 new->type
= actual_type
;
381 spin_lock(&memtype_lock
);
383 if (cached_entry
&& start
>= cached_start
)
384 entry
= cached_entry
;
386 entry
= list_entry(&memtype_list
, struct memtype
, nd
);
388 /* Search for existing mapping that overlaps the current range */
390 list_for_each_entry_continue(entry
, &memtype_list
, nd
) {
391 if (end
<= entry
->start
) {
392 where
= entry
->nd
.prev
;
393 cached_entry
= list_entry(where
, struct memtype
, nd
);
395 } else if (start
<= entry
->start
) { /* end > entry->start */
396 err
= chk_conflict(new, entry
, new_type
);
398 dprintk("Overlap at 0x%Lx-0x%Lx\n",
399 entry
->start
, entry
->end
);
400 where
= entry
->nd
.prev
;
401 cached_entry
= list_entry(where
,
405 } else if (start
< entry
->end
) { /* start > entry->start */
406 err
= chk_conflict(new, entry
, new_type
);
408 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry
->start
, entry
->end
);
410 cached_entry
= list_entry(entry
->nd
.prev
,
414 * Move to right position in the linked
415 * list to add this new entry
417 list_for_each_entry_continue(entry
,
419 if (start
<= entry
->start
) {
420 where
= entry
->nd
.prev
;
430 printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, "
431 "track %s, req %s\n",
432 start
, end
, cattr_name(new->type
), cattr_name(req_type
));
434 spin_unlock(&memtype_lock
);
439 cached_start
= start
;
442 list_add(&new->nd
, where
);
444 list_add_tail(&new->nd
, &memtype_list
);
446 spin_unlock(&memtype_lock
);
448 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
449 start
, end
, cattr_name(new->type
), cattr_name(req_type
),
450 new_type
? cattr_name(*new_type
) : "-");
455 int free_memtype(u64 start
, u64 end
)
457 struct memtype
*entry
;
464 /* Low ISA region is always mapped WB. No need to track */
465 if (is_ISA_range(start
, end
- 1))
468 is_range_ram
= pat_pagerange_is_ram(start
, end
);
469 if (is_range_ram
== 1)
470 return free_ram_pages_type(start
, end
);
471 else if (is_range_ram
< 0)
474 spin_lock(&memtype_lock
);
475 list_for_each_entry(entry
, &memtype_list
, nd
) {
476 if (entry
->start
== start
&& entry
->end
== end
) {
477 if (cached_entry
== entry
|| cached_start
== start
)
480 list_del(&entry
->nd
);
486 spin_unlock(&memtype_lock
);
489 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
490 current
->comm
, current
->pid
, start
, end
);
493 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
499 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
500 unsigned long size
, pgprot_t vma_prot
)
505 #ifdef CONFIG_STRICT_DEVMEM
506 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
507 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
512 /* This check is needed to avoid cache aliasing when PAT is enabled */
513 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
515 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
516 u64 to
= from
+ size
;
522 while (cursor
< to
) {
523 if (!devmem_is_allowed(pfn
)) {
525 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
526 current
->comm
, from
, to
);
534 #endif /* CONFIG_STRICT_DEVMEM */
536 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
537 unsigned long size
, pgprot_t
*vma_prot
)
539 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
540 unsigned long flags
= -1;
543 if (!range_is_allowed(pfn
, size
))
546 if (file
->f_flags
& O_SYNC
) {
547 flags
= _PAGE_CACHE_UC_MINUS
;
552 * On the PPro and successors, the MTRRs are used to set
553 * memory types for physical addresses outside main memory,
554 * so blindly setting UC or PWT on those pages is wrong.
555 * For Pentiums and earlier, the surround logic should disable
556 * caching for the high addresses through the KEN pin, but
557 * we maintain the tradition of paranoia in this code.
560 !(boot_cpu_has(X86_FEATURE_MTRR
) ||
561 boot_cpu_has(X86_FEATURE_K6_MTRR
) ||
562 boot_cpu_has(X86_FEATURE_CYRIX_ARR
) ||
563 boot_cpu_has(X86_FEATURE_CENTAUR_MCR
)) &&
564 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
565 flags
= _PAGE_CACHE_UC
;
570 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
572 * Without O_SYNC, we want to get
573 * - WB for WB-able memory and no other conflicting mappings
574 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
575 * - Inherit from confliting mappings otherwise
578 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
580 retval
= reserve_memtype(offset
, offset
+ size
, -1, &flags
);
586 if (((pfn
< max_low_pfn_mapped
) ||
587 (pfn
>= (1UL<<(32 - PAGE_SHIFT
)) && pfn
< max_pfn_mapped
)) &&
588 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
589 free_memtype(offset
, offset
+ size
);
591 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
592 current
->comm
, current
->pid
,
594 offset
, (unsigned long long)(offset
+ size
));
598 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
603 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
605 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
606 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
609 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
610 if (flags
!= want_flags
) {
612 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
613 current
->comm
, current
->pid
,
614 cattr_name(want_flags
),
615 addr
, (unsigned long long)(addr
+ size
),
620 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
622 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
624 free_memtype(addr
, addr
+ size
);
628 * Internal interface to reserve a range of physical memory with prot.
629 * Reserved non RAM regions only and after successful reserve_memtype,
630 * this func also keeps identity mapping (if any) in sync with this new prot.
632 static int reserve_pfn_range(u64 paddr
, unsigned long size
, pgprot_t
*vma_prot
,
638 unsigned long want_flags
= (pgprot_val(*vma_prot
) & _PAGE_CACHE_MASK
);
640 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
643 * reserve_pfn_range() doesn't support RAM pages.
648 ret
= reserve_memtype(paddr
, paddr
+ size
, want_flags
, &flags
);
652 if (flags
!= want_flags
) {
653 if (strict_prot
|| !is_new_memtype_allowed(want_flags
, flags
)) {
654 free_memtype(paddr
, paddr
+ size
);
655 printk(KERN_ERR
"%s:%d map pfn expected mapping type %s"
656 " for %Lx-%Lx, got %s\n",
657 current
->comm
, current
->pid
,
658 cattr_name(want_flags
),
659 (unsigned long long)paddr
,
660 (unsigned long long)(paddr
+ size
),
665 * We allow returning different type than the one requested in
668 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) &
669 (~_PAGE_CACHE_MASK
)) |
673 /* Need to keep identity mapping in sync */
674 if (paddr
>= __pa(high_memory
))
677 id_sz
= (__pa(high_memory
) < paddr
+ size
) ?
678 __pa(high_memory
) - paddr
:
681 if (ioremap_change_attr((unsigned long)__va(paddr
), id_sz
, flags
) < 0) {
682 free_memtype(paddr
, paddr
+ size
);
684 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
686 current
->comm
, current
->pid
,
688 (unsigned long long)paddr
,
689 (unsigned long long)(paddr
+ size
));
696 * Internal interface to free a range of physical memory.
697 * Frees non RAM regions only.
699 static void free_pfn_range(u64 paddr
, unsigned long size
)
703 is_ram
= pat_pagerange_is_ram(paddr
, paddr
+ size
);
705 free_memtype(paddr
, paddr
+ size
);
709 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
710 * copied through copy_page_range().
712 * If the vma has a linear pfn mapping for the entire range, we get the prot
713 * from pte and reserve the entire vma range with single reserve_pfn_range call.
714 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
715 * by page to get physical address and protection.
717 int track_pfn_vma_copy(struct vm_area_struct
*vma
)
721 resource_size_t paddr
;
723 unsigned long vma_start
= vma
->vm_start
;
724 unsigned long vma_end
= vma
->vm_end
;
725 unsigned long vma_size
= vma_end
- vma_start
;
731 if (is_linear_pfn_mapping(vma
)) {
733 * reserve the whole chunk covered by vma. We need the
734 * starting address and protection from pte.
736 if (follow_phys(vma
, vma_start
, 0, &prot
, &paddr
)) {
740 pgprot
= __pgprot(prot
);
741 return reserve_pfn_range(paddr
, vma_size
, &pgprot
, 1);
744 /* reserve entire vma page by page, using pfn and prot from pte */
745 for (i
= 0; i
< vma_size
; i
+= PAGE_SIZE
) {
746 if (follow_phys(vma
, vma_start
+ i
, 0, &prot
, &paddr
))
749 pgprot
= __pgprot(prot
);
750 retval
= reserve_pfn_range(paddr
, PAGE_SIZE
, &pgprot
, 1);
757 /* Reserve error: Cleanup partial reservation and return error */
758 for (j
= 0; j
< i
; j
+= PAGE_SIZE
) {
759 if (follow_phys(vma
, vma_start
+ j
, 0, &prot
, &paddr
))
762 free_pfn_range(paddr
, PAGE_SIZE
);
769 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
770 * for physical range indicated by pfn and size.
772 * prot is passed in as a parameter for the new mapping. If the vma has a
773 * linear pfn mapping for the entire range reserve the entire vma range with
774 * single reserve_pfn_range call.
775 * Otherwise, we look t the pfn and size and reserve only the specified range
778 * Note that this function can be called with caller trying to map only a
779 * subrange/page inside the vma.
781 int track_pfn_vma_new(struct vm_area_struct
*vma
, pgprot_t
*prot
,
782 unsigned long pfn
, unsigned long size
)
786 resource_size_t base_paddr
;
787 resource_size_t paddr
;
788 unsigned long vma_start
= vma
->vm_start
;
789 unsigned long vma_end
= vma
->vm_end
;
790 unsigned long vma_size
= vma_end
- vma_start
;
795 if (is_linear_pfn_mapping(vma
)) {
796 /* reserve the whole chunk starting from vm_pgoff */
797 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
798 return reserve_pfn_range(paddr
, vma_size
, prot
, 0);
801 /* reserve page by page using pfn and size */
802 base_paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
803 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
804 paddr
= base_paddr
+ i
;
805 retval
= reserve_pfn_range(paddr
, PAGE_SIZE
, prot
, 0);
812 /* Reserve error: Cleanup partial reservation and return error */
813 for (j
= 0; j
< i
; j
+= PAGE_SIZE
) {
814 paddr
= base_paddr
+ j
;
815 free_pfn_range(paddr
, PAGE_SIZE
);
822 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
823 * untrack can be called for a specific region indicated by pfn and size or
824 * can be for the entire vma (in which case size can be zero).
826 void untrack_pfn_vma(struct vm_area_struct
*vma
, unsigned long pfn
,
830 resource_size_t paddr
;
832 unsigned long vma_start
= vma
->vm_start
;
833 unsigned long vma_end
= vma
->vm_end
;
834 unsigned long vma_size
= vma_end
- vma_start
;
839 if (is_linear_pfn_mapping(vma
)) {
840 /* free the whole chunk starting from vm_pgoff */
841 paddr
= (resource_size_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
842 free_pfn_range(paddr
, vma_size
);
846 if (size
!= 0 && size
!= vma_size
) {
847 /* free page by page, using pfn and size */
848 paddr
= (resource_size_t
)pfn
<< PAGE_SHIFT
;
849 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
851 free_pfn_range(paddr
, PAGE_SIZE
);
854 /* free entire vma, page by page, using the pfn from pte */
855 for (i
= 0; i
< vma_size
; i
+= PAGE_SIZE
) {
856 if (follow_phys(vma
, vma_start
+ i
, 0, &prot
, &paddr
))
859 free_pfn_range(paddr
, PAGE_SIZE
);
864 pgprot_t
pgprot_writecombine(pgprot_t prot
)
867 return __pgprot(pgprot_val(prot
) | _PAGE_CACHE_WC
);
869 return pgprot_noncached(prot
);
872 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
874 /* get Nth element of the linked list */
875 static struct memtype
*memtype_get_idx(loff_t pos
)
877 struct memtype
*list_node
, *print_entry
;
880 print_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
884 spin_lock(&memtype_lock
);
885 list_for_each_entry(list_node
, &memtype_list
, nd
) {
887 *print_entry
= *list_node
;
888 spin_unlock(&memtype_lock
);
893 spin_unlock(&memtype_lock
);
899 static void *memtype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
903 seq_printf(seq
, "PAT memtype list:\n");
906 return memtype_get_idx(*pos
);
909 static void *memtype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
912 return memtype_get_idx(*pos
);
915 static void memtype_seq_stop(struct seq_file
*seq
, void *v
)
919 static int memtype_seq_show(struct seq_file
*seq
, void *v
)
921 struct memtype
*print_entry
= (struct memtype
*)v
;
923 seq_printf(seq
, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry
->type
),
924 print_entry
->start
, print_entry
->end
);
930 static struct seq_operations memtype_seq_ops
= {
931 .start
= memtype_seq_start
,
932 .next
= memtype_seq_next
,
933 .stop
= memtype_seq_stop
,
934 .show
= memtype_seq_show
,
937 static int memtype_seq_open(struct inode
*inode
, struct file
*file
)
939 return seq_open(file
, &memtype_seq_ops
);
942 static const struct file_operations memtype_fops
= {
943 .open
= memtype_seq_open
,
946 .release
= seq_release
,
949 static int __init
pat_memtype_list_init(void)
951 debugfs_create_file("pat_memtype_list", S_IRUSR
, arch_debugfs_dir
,
952 NULL
, &memtype_fops
);
956 late_initcall(pat_memtype_list_init
);
958 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */