2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
14 #include <linux/bootmem.h>
17 #include <asm/tlbflush.h>
18 #include <asm/processor.h>
20 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/fcntl.h>
29 int __read_mostly pat_wc_enabled
= 1;
31 void __cpuinit
pat_disable(char *reason
)
34 printk(KERN_INFO
"%s\n", reason
);
37 static int nopat(char *str
)
39 pat_disable("PAT support disabled.");
42 early_param("nopat", nopat
);
45 static u64 __read_mostly boot_pat_state
;
48 PAT_UC
= 0, /* uncached */
49 PAT_WC
= 1, /* Write combining */
50 PAT_WT
= 4, /* Write Through */
51 PAT_WP
= 5, /* Write Protected */
52 PAT_WB
= 6, /* Write Back (default) */
53 PAT_UC_MINUS
= 7, /* UC, but can be overriden by MTRR */
56 #define PAT(x,y) ((u64)PAT_ ## y << ((x)*8))
67 printk(KERN_ERR
"PAT enabled, but CPU feature cleared\n");
69 * Panic if this happens on the secondary CPU, and we
70 * switched to PAT on the boot CPU. We have no way to
73 BUG_ON(boot_pat_state
);
76 /* Set PWT to Write-Combining. All other bits stay the same */
78 * PTE encoding used in Linux:
83 * 000 WB _PAGE_CACHE_WB
84 * 001 WC _PAGE_CACHE_WC
85 * 010 UC- _PAGE_CACHE_UC_MINUS
86 * 011 UC _PAGE_CACHE_UC
89 pat
= PAT(0,WB
) | PAT(1,WC
) | PAT(2,UC_MINUS
) | PAT(3,UC
) |
90 PAT(4,WB
) | PAT(5,WC
) | PAT(6,UC_MINUS
) | PAT(7,UC
);
94 rdmsrl(MSR_IA32_CR_PAT
, boot_pat_state
);
96 wrmsrl(MSR_IA32_CR_PAT
, pat
);
97 printk(KERN_INFO
"x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
98 smp_processor_id(), boot_pat_state
, pat
);
103 static char *cattr_name(unsigned long flags
)
105 switch (flags
& _PAGE_CACHE_MASK
) {
106 case _PAGE_CACHE_UC
: return "uncached";
107 case _PAGE_CACHE_UC_MINUS
: return "uncached-minus";
108 case _PAGE_CACHE_WB
: return "write-back";
109 case _PAGE_CACHE_WC
: return "write-combining";
110 default: return "broken";
115 * The global memtype list keeps track of memory type for specific
116 * physical memory areas. Conflicting memory types in different
117 * mappings can cause CPU cache corruption. To avoid this we keep track.
119 * The list is sorted based on starting address and can contain multiple
120 * entries for each address (this allows reference counting for overlapping
121 * areas). All the aliases have the same cache attributes of course.
122 * Zero attributes are represented as holes.
124 * Currently the data structure is a list because the number of mappings
125 * are expected to be relatively small. If this should be a problem
126 * it could be changed to a rbtree or similar.
128 * memtype_lock protects the whole list.
138 static LIST_HEAD(memtype_list
);
139 static DEFINE_SPINLOCK(memtype_lock
); /* protects memtype list */
142 * Does intersection of PAT memory type and MTRR memory type and returns
143 * the resulting memory type as PAT understands it.
144 * (Type in pat and mtrr will not have same value)
145 * The intersection is based on "Effective Memory Type" tables in IA-32
148 static int pat_x_mtrr_type(u64 start
, u64 end
, unsigned long prot
,
149 unsigned long *ret_prot
)
151 unsigned long pat_type
;
154 mtrr_type
= mtrr_type_lookup(start
, end
);
155 if (mtrr_type
== 0xFF) { /* MTRR not enabled */
159 if (mtrr_type
== 0xFE) { /* MTRR match error */
160 *ret_prot
= _PAGE_CACHE_UC
;
163 if (mtrr_type
!= MTRR_TYPE_UNCACHABLE
&&
164 mtrr_type
!= MTRR_TYPE_WRBACK
&&
165 mtrr_type
!= MTRR_TYPE_WRCOMB
) { /* MTRR type unhandled */
166 *ret_prot
= _PAGE_CACHE_UC
;
170 pat_type
= prot
& _PAGE_CACHE_MASK
;
171 prot
&= (~_PAGE_CACHE_MASK
);
173 /* Currently doing intersection by hand. Optimize it later. */
174 if (pat_type
== _PAGE_CACHE_WC
) {
175 *ret_prot
= prot
| _PAGE_CACHE_WC
;
176 } else if (pat_type
== _PAGE_CACHE_UC_MINUS
) {
177 *ret_prot
= prot
| _PAGE_CACHE_UC_MINUS
;
178 } else if (pat_type
== _PAGE_CACHE_UC
||
179 mtrr_type
== MTRR_TYPE_UNCACHABLE
) {
180 *ret_prot
= prot
| _PAGE_CACHE_UC
;
181 } else if (mtrr_type
== MTRR_TYPE_WRCOMB
) {
182 *ret_prot
= prot
| _PAGE_CACHE_WC
;
184 *ret_prot
= prot
| _PAGE_CACHE_WB
;
191 * req_type typically has one of the:
194 * - _PAGE_CACHE_UC_MINUS
197 * req_type will have a special case value '-1', when requester want to inherit
198 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
200 * If ret_type is NULL, function will return an error if it cannot reserve the
201 * region with req_type. If ret_type is non-null, function will return
202 * available type in ret_type in case of no error. In case of any error
203 * it will return a negative return value.
205 int reserve_memtype(u64 start
, u64 end
, unsigned long req_type
,
206 unsigned long *ret_type
)
208 struct memtype
*new_entry
= NULL
;
209 struct memtype
*parse
;
210 unsigned long actual_type
;
213 /* Only track when pat_wc_enabled */
214 if (!pat_wc_enabled
) {
215 /* This is identical to page table setting without PAT */
217 if (req_type
== -1) {
218 *ret_type
= _PAGE_CACHE_WB
;
220 *ret_type
= req_type
;
226 /* Low ISA region is always mapped WB in page table. No need to track */
227 if (start
>= ISA_START_ADDRESS
&& (end
- 1) <= ISA_END_ADDRESS
) {
229 *ret_type
= _PAGE_CACHE_WB
;
234 if (req_type
== -1) {
236 * Special case where caller wants to inherit from mtrr or
237 * existing pat mapping, defaulting to UC_MINUS in case of
240 u8 mtrr_type
= mtrr_type_lookup(start
, end
);
241 if (mtrr_type
== 0xFE) { /* MTRR match error */
245 if (mtrr_type
== MTRR_TYPE_WRBACK
) {
246 req_type
= _PAGE_CACHE_WB
;
247 actual_type
= _PAGE_CACHE_WB
;
249 req_type
= _PAGE_CACHE_UC_MINUS
;
250 actual_type
= _PAGE_CACHE_UC_MINUS
;
253 req_type
&= _PAGE_CACHE_MASK
;
254 err
= pat_x_mtrr_type(start
, end
, req_type
, &actual_type
);
259 *ret_type
= actual_type
;
264 new_entry
= kmalloc(sizeof(struct memtype
), GFP_KERNEL
);
268 new_entry
->start
= start
;
269 new_entry
->end
= end
;
270 new_entry
->type
= actual_type
;
273 *ret_type
= actual_type
;
275 spin_lock(&memtype_lock
);
277 /* Search for existing mapping that overlaps the current range */
278 list_for_each_entry(parse
, &memtype_list
, nd
) {
279 struct memtype
*saved_ptr
;
281 if (parse
->start
>= end
) {
282 pr_debug("New Entry\n");
283 list_add(&new_entry
->nd
, parse
->nd
.prev
);
288 if (start
<= parse
->start
&& end
>= parse
->start
) {
289 if (actual_type
!= parse
->type
&& ret_type
) {
290 actual_type
= parse
->type
;
291 *ret_type
= actual_type
;
292 new_entry
->type
= actual_type
;
295 if (actual_type
!= parse
->type
) {
297 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
298 current
->comm
, current
->pid
,
300 cattr_name(actual_type
),
301 cattr_name(parse
->type
));
308 * Check to see whether the request overlaps more
309 * than one entry in the list
311 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
312 if (end
<= parse
->start
) {
316 if (actual_type
!= parse
->type
) {
318 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
319 current
->comm
, current
->pid
,
321 cattr_name(actual_type
),
322 cattr_name(parse
->type
));
332 pr_debug("Overlap at 0x%Lx-0x%Lx\n",
333 saved_ptr
->start
, saved_ptr
->end
);
334 /* No conflict. Go ahead and add this new entry */
335 list_add(&new_entry
->nd
, saved_ptr
->nd
.prev
);
340 if (start
< parse
->end
) {
341 if (actual_type
!= parse
->type
&& ret_type
) {
342 actual_type
= parse
->type
;
343 *ret_type
= actual_type
;
344 new_entry
->type
= actual_type
;
347 if (actual_type
!= parse
->type
) {
349 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
350 current
->comm
, current
->pid
,
352 cattr_name(actual_type
),
353 cattr_name(parse
->type
));
360 * Check to see whether the request overlaps more
361 * than one entry in the list
363 list_for_each_entry_continue(parse
, &memtype_list
, nd
) {
364 if (end
<= parse
->start
) {
368 if (actual_type
!= parse
->type
) {
370 KERN_INFO
"%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
371 current
->comm
, current
->pid
,
373 cattr_name(actual_type
),
374 cattr_name(parse
->type
));
384 pr_debug(KERN_INFO
"Overlap at 0x%Lx-0x%Lx\n",
385 saved_ptr
->start
, saved_ptr
->end
);
386 /* No conflict. Go ahead and add this new entry */
387 list_add(&new_entry
->nd
, &saved_ptr
->nd
);
395 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
396 start
, end
, cattr_name(new_entry
->type
),
397 cattr_name(req_type
));
399 spin_unlock(&memtype_lock
);
404 /* No conflict. Not yet added to the list. Add to the tail */
405 list_add_tail(&new_entry
->nd
, &memtype_list
);
406 pr_debug("New Entry\n");
411 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
412 start
, end
, cattr_name(actual_type
),
413 cattr_name(req_type
), cattr_name(*ret_type
));
416 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
417 start
, end
, cattr_name(actual_type
),
418 cattr_name(req_type
));
421 spin_unlock(&memtype_lock
);
425 int free_memtype(u64 start
, u64 end
)
430 /* Only track when pat_wc_enabled */
431 if (!pat_wc_enabled
) {
435 /* Low ISA region is always mapped WB. No need to track */
436 if (start
>= ISA_START_ADDRESS
&& end
<= ISA_END_ADDRESS
) {
440 spin_lock(&memtype_lock
);
441 list_for_each_entry(ml
, &memtype_list
, nd
) {
442 if (ml
->start
== start
&& ml
->end
== end
) {
449 spin_unlock(&memtype_lock
);
452 printk(KERN_INFO
"%s:%d freeing invalid memtype %Lx-%Lx\n",
453 current
->comm
, current
->pid
, start
, end
);
456 pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start
, end
);
462 * /dev/mem mmap interface. The memtype used for mapping varies:
463 * - Use UC for mappings with O_SYNC flag
464 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
465 * inherit the memtype from existing mapping.
466 * - Else use UC_MINUS memtype (for backward compatibility with existing
469 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
470 unsigned long size
, pgprot_t vma_prot
)
475 #ifdef CONFIG_NONPROMISC_DEVMEM
476 /* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
477 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
482 static inline int range_is_allowed(unsigned long pfn
, unsigned long size
)
484 u64 from
= ((u64
)pfn
) << PAGE_SHIFT
;
485 u64 to
= from
+ size
;
488 while (cursor
< to
) {
489 if (!devmem_is_allowed(pfn
)) {
491 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
492 current
->comm
, from
, to
);
500 #endif /* CONFIG_NONPROMISC_DEVMEM */
502 int phys_mem_access_prot_allowed(struct file
*file
, unsigned long pfn
,
503 unsigned long size
, pgprot_t
*vma_prot
)
505 u64 offset
= ((u64
) pfn
) << PAGE_SHIFT
;
506 unsigned long flags
= _PAGE_CACHE_UC_MINUS
;
509 if (!range_is_allowed(pfn
, size
))
512 if (file
->f_flags
& O_SYNC
) {
513 flags
= _PAGE_CACHE_UC
;
518 * On the PPro and successors, the MTRRs are used to set
519 * memory types for physical addresses outside main memory,
520 * so blindly setting UC or PWT on those pages is wrong.
521 * For Pentiums and earlier, the surround logic should disable
522 * caching for the high addresses through the KEN pin, but
523 * we maintain the tradition of paranoia in this code.
525 if (!pat_wc_enabled
&&
526 ! ( test_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
) ||
527 test_bit(X86_FEATURE_K6_MTRR
, boot_cpu_data
.x86_capability
) ||
528 test_bit(X86_FEATURE_CYRIX_ARR
, boot_cpu_data
.x86_capability
) ||
529 test_bit(X86_FEATURE_CENTAUR_MCR
, boot_cpu_data
.x86_capability
)) &&
530 (pfn
<< PAGE_SHIFT
) >= __pa(high_memory
)) {
531 flags
= _PAGE_CACHE_UC
;
536 * With O_SYNC, we can only take UC mapping. Fail if we cannot.
537 * Without O_SYNC, we want to get
538 * - WB for WB-able memory and no other conflicting mappings
539 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
540 * - Inherit from confliting mappings otherwise
542 if (flags
!= _PAGE_CACHE_UC_MINUS
) {
543 retval
= reserve_memtype(offset
, offset
+ size
, flags
, NULL
);
545 retval
= reserve_memtype(offset
, offset
+ size
, -1, &flags
);
551 if (pfn
<= max_pfn_mapped
&&
552 ioremap_change_attr((unsigned long)__va(offset
), size
, flags
) < 0) {
553 free_memtype(offset
, offset
+ size
);
555 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
556 current
->comm
, current
->pid
,
558 offset
, (unsigned long long)(offset
+ size
));
562 *vma_prot
= __pgprot((pgprot_val(*vma_prot
) & ~_PAGE_CACHE_MASK
) |
567 void map_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
569 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
571 unsigned long want_flags
= (pgprot_val(vma_prot
) & _PAGE_CACHE_MASK
);
573 reserve_memtype(addr
, addr
+ size
, want_flags
, &flags
);
574 if (flags
!= want_flags
) {
576 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
577 current
->comm
, current
->pid
,
578 cattr_name(want_flags
),
579 addr
, (unsigned long long)(addr
+ size
),
584 void unmap_devmem(unsigned long pfn
, unsigned long size
, pgprot_t vma_prot
)
586 u64 addr
= (u64
)pfn
<< PAGE_SHIFT
;
588 free_memtype(addr
, addr
+ size
);