Commit | Line | Data |
---|---|---|
1da177e4 | 1 | #include <linux/highmem.h> |
129f6946 | 2 | #include <linux/module.h> |
867c5b52 | 3 | #include <linux/swap.h> /* for totalram_pages */ |
1da177e4 LT |
4 | |
5 | void *kmap(struct page *page) | |
6 | { | |
7 | might_sleep(); | |
8 | if (!PageHighMem(page)) | |
9 | return page_address(page); | |
10 | return kmap_high(page); | |
11 | } | |
12 | ||
13 | void kunmap(struct page *page) | |
14 | { | |
15 | if (in_interrupt()) | |
16 | BUG(); | |
17 | if (!PageHighMem(page)) | |
18 | return; | |
19 | kunmap_high(page); | |
20 | } | |
21 | ||
022eb434 AM |
22 | static void debug_kmap_atomic_prot(enum km_type type) |
23 | { | |
24 | #ifdef CONFIG_DEBUG_HIGHMEM | |
25 | static unsigned warn_count = 10; | |
26 | ||
27 | if (unlikely(warn_count == 0)) | |
28 | return; | |
29 | ||
30 | if (unlikely(in_interrupt())) { | |
31 | if (in_irq()) { | |
32 | if (type != KM_IRQ0 && type != KM_IRQ1 && | |
33 | type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && | |
34 | type != KM_BOUNCE_READ) { | |
35 | WARN_ON(1); | |
36 | warn_count--; | |
37 | } | |
38 | } else if (!irqs_disabled()) { /* softirq */ | |
39 | if (type != KM_IRQ0 && type != KM_IRQ1 && | |
40 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | |
41 | type != KM_SKB_SUNRPC_DATA && | |
42 | type != KM_SKB_DATA_SOFTIRQ && | |
43 | type != KM_BOUNCE_READ) { | |
44 | WARN_ON(1); | |
45 | warn_count--; | |
46 | } | |
47 | } | |
48 | } | |
49 | ||
50 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | |
51 | type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) { | |
52 | if (!irqs_disabled()) { | |
53 | WARN_ON(1); | |
54 | warn_count--; | |
55 | } | |
56 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | |
57 | if (irq_count() == 0 && !irqs_disabled()) { | |
58 | WARN_ON(1); | |
59 | warn_count--; | |
60 | } | |
61 | } | |
62 | #endif | |
63 | } | |
64 | ||
1da177e4 LT |
65 | /* |
66 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
67 | * no global lock is needed and because the kmap code must perform a global TLB | |
68 | * invalidation when the kmap pool wraps. | |
69 | * | |
70 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
71 | * kmaps are appropriate for short, tight code paths only. | |
72 | */ | |
ce6234b5 | 73 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
1da177e4 LT |
74 | { |
75 | enum fixed_addresses idx; | |
76 | unsigned long vaddr; | |
022eb434 | 77 | |
9c312058 | 78 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
a866374a | 79 | pagefault_disable(); |
656dad31 | 80 | |
1da177e4 LT |
81 | if (!PageHighMem(page)) |
82 | return page_address(page); | |
83 | ||
9c312058 AM |
84 | debug_kmap_atomic_prot(type); |
85 | ||
4150d3f5 | 86 | idx = type + KM_TYPE_NR*smp_processor_id(); |
1da177e4 | 87 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
4150d3f5 | 88 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
ce6234b5 | 89 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
49f19710 | 90 | arch_flush_lazy_mmu_mode(); |
1da177e4 | 91 | |
4150d3f5 | 92 | return (void *)vaddr; |
1da177e4 LT |
93 | } |
94 | ||
ce6234b5 JF |
95 | void *kmap_atomic(struct page *page, enum km_type type) |
96 | { | |
97 | return kmap_atomic_prot(page, type, kmap_prot); | |
98 | } | |
99 | ||
1da177e4 LT |
100 | void kunmap_atomic(void *kvaddr, enum km_type type) |
101 | { | |
1da177e4 LT |
102 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
103 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | |
104 | ||
1da177e4 | 105 | /* |
23002d88 ZA |
106 | * Force other mappings to Oops if they'll try to access this pte |
107 | * without first remap it. Keeping stale mappings around is a bad idea | |
108 | * also, in case the page changes cacheability attributes or becomes | |
109 | * a protected page in a hypervisor. | |
1da177e4 | 110 | */ |
3b17979b JF |
111 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
112 | kpte_clear_flush(kmap_pte-idx, vaddr); | |
113 | else { | |
114 | #ifdef CONFIG_DEBUG_HIGHMEM | |
115 | BUG_ON(vaddr < PAGE_OFFSET); | |
116 | BUG_ON(vaddr >= (unsigned long)high_memory); | |
117 | #endif | |
118 | } | |
1da177e4 | 119 | |
7b2f27f4 | 120 | arch_flush_lazy_mmu_mode(); |
a866374a | 121 | pagefault_enable(); |
1da177e4 LT |
122 | } |
123 | ||
bb6d59ca | 124 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) |
60e64d46 VG |
125 | { |
126 | enum fixed_addresses idx; | |
127 | unsigned long vaddr; | |
128 | ||
a866374a | 129 | pagefault_disable(); |
60e64d46 | 130 | |
bb6d59ca | 131 | idx = type + KM_TYPE_NR * smp_processor_id(); |
60e64d46 | 132 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
bb6d59ca | 133 | set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); |
49f19710 | 134 | arch_flush_lazy_mmu_mode(); |
60e64d46 VG |
135 | |
136 | return (void*) vaddr; | |
137 | } | |
bb6d59ca AM |
138 | |
139 | /* This is the same as kmap_atomic() but can map memory that doesn't | |
140 | * have a struct page associated with it. | |
141 | */ | |
142 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |
143 | { | |
144 | return kmap_atomic_prot_pfn(pfn, type, kmap_prot); | |
145 | } | |
d1d8c925 | 146 | EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ |
60e64d46 | 147 | |
1da177e4 LT |
148 | struct page *kmap_atomic_to_page(void *ptr) |
149 | { | |
150 | unsigned long idx, vaddr = (unsigned long)ptr; | |
151 | pte_t *pte; | |
152 | ||
153 | if (vaddr < FIXADDR_START) | |
154 | return virt_to_page(ptr); | |
155 | ||
156 | idx = virt_to_fix(vaddr); | |
157 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
158 | return pte_page(*pte); | |
159 | } | |
160 | ||
129f6946 AD |
161 | EXPORT_SYMBOL(kmap); |
162 | EXPORT_SYMBOL(kunmap); | |
163 | EXPORT_SYMBOL(kmap_atomic); | |
164 | EXPORT_SYMBOL(kunmap_atomic); | |
867c5b52 | 165 | |
867c5b52 PE |
166 | void __init set_highmem_pages_init(void) |
167 | { | |
168 | struct zone *zone; | |
169 | int nid; | |
170 | ||
171 | for_each_zone(zone) { | |
172 | unsigned long zone_start_pfn, zone_end_pfn; | |
173 | ||
174 | if (!is_highmem(zone)) | |
175 | continue; | |
176 | ||
177 | zone_start_pfn = zone->zone_start_pfn; | |
178 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | |
179 | ||
180 | nid = zone_to_nid(zone); | |
181 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", | |
182 | zone->name, nid, zone_start_pfn, zone_end_pfn); | |
183 | ||
184 | add_highpages_with_active_regions(nid, zone_start_pfn, | |
185 | zone_end_pfn); | |
186 | } | |
187 | totalram_pages += totalhigh_pages; | |
188 | } |