Commit | Line | Data |
---|---|---|
1da177e4 | 1 | #include <linux/highmem.h> |
129f6946 | 2 | #include <linux/module.h> |
1da177e4 LT |
3 | |
4 | void *kmap(struct page *page) | |
5 | { | |
6 | might_sleep(); | |
7 | if (!PageHighMem(page)) | |
8 | return page_address(page); | |
9 | return kmap_high(page); | |
10 | } | |
11 | ||
12 | void kunmap(struct page *page) | |
13 | { | |
14 | if (in_interrupt()) | |
15 | BUG(); | |
16 | if (!PageHighMem(page)) | |
17 | return; | |
18 | kunmap_high(page); | |
19 | } | |
20 | ||
21 | /* | |
22 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
23 | * no global lock is needed and because the kmap code must perform a global TLB | |
24 | * invalidation when the kmap pool wraps. | |
25 | * | |
26 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
27 | * kmaps are appropriate for short, tight code paths only. | |
28 | */ | |
ce6234b5 | 29 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
1da177e4 LT |
30 | { |
31 | enum fixed_addresses idx; | |
32 | unsigned long vaddr; | |
33 | ||
34 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | |
a866374a | 35 | pagefault_disable(); |
656dad31 IM |
36 | |
37 | idx = type + KM_TYPE_NR*smp_processor_id(); | |
38 | BUG_ON(!pte_none(*(kmap_pte-idx))); | |
39 | ||
1da177e4 LT |
40 | if (!PageHighMem(page)) |
41 | return page_address(page); | |
42 | ||
1da177e4 | 43 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
ce6234b5 | 44 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
49f19710 | 45 | arch_flush_lazy_mmu_mode(); |
1da177e4 LT |
46 | |
47 | return (void*) vaddr; | |
48 | } | |
49 | ||
ce6234b5 JF |
50 | void *kmap_atomic(struct page *page, enum km_type type) |
51 | { | |
52 | return kmap_atomic_prot(page, type, kmap_prot); | |
53 | } | |
54 | ||
1da177e4 LT |
55 | void kunmap_atomic(void *kvaddr, enum km_type type) |
56 | { | |
1da177e4 LT |
57 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
58 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | |
59 | ||
1da177e4 | 60 | /* |
23002d88 ZA |
61 | * Force other mappings to Oops if they'll try to access this pte |
62 | * without first remap it. Keeping stale mappings around is a bad idea | |
63 | * also, in case the page changes cacheability attributes or becomes | |
64 | * a protected page in a hypervisor. | |
1da177e4 | 65 | */ |
3b17979b JF |
66 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
67 | kpte_clear_flush(kmap_pte-idx, vaddr); | |
68 | else { | |
69 | #ifdef CONFIG_DEBUG_HIGHMEM | |
70 | BUG_ON(vaddr < PAGE_OFFSET); | |
71 | BUG_ON(vaddr >= (unsigned long)high_memory); | |
72 | #endif | |
73 | } | |
1da177e4 | 74 | |
7b2f27f4 | 75 | arch_flush_lazy_mmu_mode(); |
a866374a | 76 | pagefault_enable(); |
1da177e4 LT |
77 | } |
78 | ||
60e64d46 VG |
79 | /* This is the same as kmap_atomic() but can map memory that doesn't |
80 | * have a struct page associated with it. | |
81 | */ | |
82 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |
83 | { | |
84 | enum fixed_addresses idx; | |
85 | unsigned long vaddr; | |
86 | ||
a866374a | 87 | pagefault_disable(); |
60e64d46 VG |
88 | |
89 | idx = type + KM_TYPE_NR*smp_processor_id(); | |
90 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
91 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | |
49f19710 | 92 | arch_flush_lazy_mmu_mode(); |
60e64d46 VG |
93 | |
94 | return (void*) vaddr; | |
95 | } | |
96 | ||
1da177e4 LT |
97 | struct page *kmap_atomic_to_page(void *ptr) |
98 | { | |
99 | unsigned long idx, vaddr = (unsigned long)ptr; | |
100 | pte_t *pte; | |
101 | ||
102 | if (vaddr < FIXADDR_START) | |
103 | return virt_to_page(ptr); | |
104 | ||
105 | idx = virt_to_fix(vaddr); | |
106 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
107 | return pte_page(*pte); | |
108 | } | |
109 | ||
129f6946 AD |
110 | EXPORT_SYMBOL(kmap); |
111 | EXPORT_SYMBOL(kunmap); | |
112 | EXPORT_SYMBOL(kmap_atomic); | |
113 | EXPORT_SYMBOL(kunmap_atomic); | |
114 | EXPORT_SYMBOL(kmap_atomic_to_page); |