Commit | Line | Data |
---|---|---|
1da177e4 | 1 | #include <linux/highmem.h> |
129f6946 | 2 | #include <linux/module.h> |
1da177e4 LT |
3 | |
4 | void *kmap(struct page *page) | |
5 | { | |
6 | might_sleep(); | |
7 | if (!PageHighMem(page)) | |
8 | return page_address(page); | |
9 | return kmap_high(page); | |
10 | } | |
11 | ||
12 | void kunmap(struct page *page) | |
13 | { | |
14 | if (in_interrupt()) | |
15 | BUG(); | |
16 | if (!PageHighMem(page)) | |
17 | return; | |
18 | kunmap_high(page); | |
19 | } | |
20 | ||
022eb434 AM |
21 | static void debug_kmap_atomic_prot(enum km_type type) |
22 | { | |
23 | #ifdef CONFIG_DEBUG_HIGHMEM | |
24 | static unsigned warn_count = 10; | |
25 | ||
26 | if (unlikely(warn_count == 0)) | |
27 | return; | |
28 | ||
29 | if (unlikely(in_interrupt())) { | |
30 | if (in_irq()) { | |
31 | if (type != KM_IRQ0 && type != KM_IRQ1 && | |
32 | type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && | |
33 | type != KM_BOUNCE_READ) { | |
34 | WARN_ON(1); | |
35 | warn_count--; | |
36 | } | |
37 | } else if (!irqs_disabled()) { /* softirq */ | |
38 | if (type != KM_IRQ0 && type != KM_IRQ1 && | |
39 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | |
40 | type != KM_SKB_SUNRPC_DATA && | |
41 | type != KM_SKB_DATA_SOFTIRQ && | |
42 | type != KM_BOUNCE_READ) { | |
43 | WARN_ON(1); | |
44 | warn_count--; | |
45 | } | |
46 | } | |
47 | } | |
48 | ||
49 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | |
50 | type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { | |
51 | if (!irqs_disabled()) { | |
52 | WARN_ON(1); | |
53 | warn_count--; | |
54 | } | |
55 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | |
56 | if (irq_count() == 0 && !irqs_disabled()) { | |
57 | WARN_ON(1); | |
58 | warn_count--; | |
59 | } | |
60 | } | |
61 | #endif | |
62 | } | |
63 | ||
1da177e4 LT |
64 | /* |
65 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
66 | * no global lock is needed and because the kmap code must perform a global TLB | |
67 | * invalidation when the kmap pool wraps. | |
68 | * | |
69 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
70 | * kmaps are appropriate for short, tight code paths only. | |
71 | */ | |
ce6234b5 | 72 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
1da177e4 LT |
73 | { |
74 | enum fixed_addresses idx; | |
75 | unsigned long vaddr; | |
022eb434 | 76 | |
9c312058 | 77 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
a866374a | 78 | pagefault_disable(); |
656dad31 | 79 | |
1da177e4 LT |
80 | if (!PageHighMem(page)) |
81 | return page_address(page); | |
82 | ||
9c312058 AM |
83 | debug_kmap_atomic_prot(type); |
84 | ||
4150d3f5 | 85 | idx = type + KM_TYPE_NR*smp_processor_id(); |
1da177e4 | 86 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
4150d3f5 | 87 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
ce6234b5 | 88 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
49f19710 | 89 | arch_flush_lazy_mmu_mode(); |
1da177e4 | 90 | |
4150d3f5 | 91 | return (void *)vaddr; |
1da177e4 LT |
92 | } |
93 | ||
ce6234b5 JF |
94 | void *kmap_atomic(struct page *page, enum km_type type) |
95 | { | |
96 | return kmap_atomic_prot(page, type, kmap_prot); | |
97 | } | |
98 | ||
1da177e4 LT |
99 | void kunmap_atomic(void *kvaddr, enum km_type type) |
100 | { | |
1da177e4 LT |
101 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
102 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | |
103 | ||
1da177e4 | 104 | /* |
23002d88 ZA |
105 | * Force other mappings to Oops if they'll try to access this pte |
106 | * without first remap it. Keeping stale mappings around is a bad idea | |
107 | * also, in case the page changes cacheability attributes or becomes | |
108 | * a protected page in a hypervisor. | |
1da177e4 | 109 | */ |
3b17979b JF |
110 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
111 | kpte_clear_flush(kmap_pte-idx, vaddr); | |
112 | else { | |
113 | #ifdef CONFIG_DEBUG_HIGHMEM | |
114 | BUG_ON(vaddr < PAGE_OFFSET); | |
115 | BUG_ON(vaddr >= (unsigned long)high_memory); | |
116 | #endif | |
117 | } | |
1da177e4 | 118 | |
7b2f27f4 | 119 | arch_flush_lazy_mmu_mode(); |
a866374a | 120 | pagefault_enable(); |
1da177e4 LT |
121 | } |
122 | ||
60e64d46 VG |
123 | /* This is the same as kmap_atomic() but can map memory that doesn't |
124 | * have a struct page associated with it. | |
125 | */ | |
126 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |
127 | { | |
128 | enum fixed_addresses idx; | |
129 | unsigned long vaddr; | |
130 | ||
a866374a | 131 | pagefault_disable(); |
60e64d46 VG |
132 | |
133 | idx = type + KM_TYPE_NR*smp_processor_id(); | |
134 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
135 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | |
49f19710 | 136 | arch_flush_lazy_mmu_mode(); |
60e64d46 VG |
137 | |
138 | return (void*) vaddr; | |
139 | } | |
140 | ||
1da177e4 LT |
141 | struct page *kmap_atomic_to_page(void *ptr) |
142 | { | |
143 | unsigned long idx, vaddr = (unsigned long)ptr; | |
144 | pte_t *pte; | |
145 | ||
146 | if (vaddr < FIXADDR_START) | |
147 | return virt_to_page(ptr); | |
148 | ||
149 | idx = virt_to_fix(vaddr); | |
150 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
151 | return pte_page(*pte); | |
152 | } | |
153 | ||
129f6946 AD |
154 | EXPORT_SYMBOL(kmap); |
155 | EXPORT_SYMBOL(kunmap); | |
156 | EXPORT_SYMBOL(kmap_atomic); | |
157 | EXPORT_SYMBOL(kunmap_atomic); |