Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
867e359b CM |
20 | #include <linux/highmem.h> |
21 | #include <linux/slab.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/cpumask.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/io.h> | |
27 | #include <linux/vmalloc.h> | |
28 | #include <linux/smp.h> | |
29 | ||
867e359b CM |
30 | #include <asm/pgtable.h> |
31 | #include <asm/pgalloc.h> | |
32 | #include <asm/fixmap.h> | |
33 | #include <asm/tlb.h> | |
34 | #include <asm/tlbflush.h> | |
35 | #include <asm/homecache.h> | |
36 | ||
37 | #define K(x) ((x) << (PAGE_SHIFT-10)) | |
38 | ||
39 | /* | |
40 | * The normal show_free_areas() is too verbose on Tile, with dozens | |
41 | * of processors and often four NUMA zones each with high and lowmem. | |
42 | */ | |
b2b755b5 | 43 | void show_mem(unsigned int filter) |
867e359b CM |
44 | { |
45 | struct zone *zone; | |
46 | ||
0707ad30 | 47 | pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" |
867e359b CM |
48 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" |
49 | " pagecache:%lu swap:%lu\n", | |
50 | (global_page_state(NR_ACTIVE_ANON) + | |
51 | global_page_state(NR_ACTIVE_FILE)), | |
52 | (global_page_state(NR_INACTIVE_ANON) + | |
53 | global_page_state(NR_INACTIVE_FILE)), | |
54 | global_page_state(NR_FILE_DIRTY), | |
55 | global_page_state(NR_WRITEBACK), | |
56 | global_page_state(NR_UNSTABLE_NFS), | |
57 | global_page_state(NR_FREE_PAGES), | |
58 | (global_page_state(NR_SLAB_RECLAIMABLE) + | |
59 | global_page_state(NR_SLAB_UNRECLAIMABLE)), | |
60 | global_page_state(NR_FILE_MAPPED), | |
61 | global_page_state(NR_PAGETABLE), | |
62 | global_page_state(NR_BOUNCE), | |
63 | global_page_state(NR_FILE_PAGES), | |
64 | nr_swap_pages); | |
65 | ||
66 | for_each_zone(zone) { | |
67 | unsigned long flags, order, total = 0, largest_order = -1; | |
68 | ||
69 | if (!populated_zone(zone)) | |
70 | continue; | |
71 | ||
867e359b CM |
72 | spin_lock_irqsave(&zone->lock, flags); |
73 | for (order = 0; order < MAX_ORDER; order++) { | |
74 | int nr = zone->free_area[order].nr_free; | |
75 | total += nr << order; | |
76 | if (nr) | |
77 | largest_order = order; | |
78 | } | |
79 | spin_unlock_irqrestore(&zone->lock, flags); | |
0707ad30 CM |
80 | pr_err("Node %d %7s: %lukB (largest %luKb)\n", |
81 | zone_to_nid(zone), zone->name, | |
867e359b CM |
82 | K(total), largest_order ? K(1UL) << largest_order : 0); |
83 | } | |
84 | } | |
85 | ||
86 | /* | |
87 | * Associate a virtual page frame with a given physical page frame | |
88 | * and protection flags for that frame. | |
89 | */ | |
90 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |
91 | { | |
92 | pgd_t *pgd; | |
93 | pud_t *pud; | |
94 | pmd_t *pmd; | |
95 | pte_t *pte; | |
96 | ||
97 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
98 | if (pgd_none(*pgd)) { | |
99 | BUG(); | |
100 | return; | |
101 | } | |
102 | pud = pud_offset(pgd, vaddr); | |
103 | if (pud_none(*pud)) { | |
104 | BUG(); | |
105 | return; | |
106 | } | |
107 | pmd = pmd_offset(pud, vaddr); | |
108 | if (pmd_none(*pmd)) { | |
109 | BUG(); | |
110 | return; | |
111 | } | |
112 | pte = pte_offset_kernel(pmd, vaddr); | |
113 | /* <pfn,flags> stored as-is, to permit clearing entries */ | |
114 | set_pte(pte, pfn_pte(pfn, flags)); | |
115 | ||
116 | /* | |
117 | * It's enough to flush this one mapping. | |
118 | * This appears conservative since it is only called | |
119 | * from __set_fixmap. | |
120 | */ | |
121 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); | |
122 | } | |
123 | ||
867e359b CM |
124 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
125 | { | |
126 | unsigned long address = __fix_to_virt(idx); | |
127 | ||
128 | if (idx >= __end_of_fixed_addresses) { | |
129 | BUG(); | |
130 | return; | |
131 | } | |
132 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | |
133 | } | |
134 | ||
135 | #if defined(CONFIG_HIGHPTE) | |
38a6f426 | 136 | pte_t *_pte_offset_map(pmd_t *dir, unsigned long address) |
867e359b | 137 | { |
38a6f426 | 138 | pte_t *pte = kmap_atomic(pmd_page(*dir)) + |
867e359b CM |
139 | (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; |
140 | return &pte[pte_index(address)]; | |
141 | } | |
142 | #endif | |
143 | ||
76c567fb CM |
144 | /** |
145 | * shatter_huge_page() - ensure a given address is mapped by a small page. | |
146 | * | |
147 | * This function converts a huge PTE mapping kernel LOWMEM into a bunch | |
148 | * of small PTEs with the same caching. No cache flush required, but we | |
149 | * must do a global TLB flush. | |
150 | * | |
151 | * Any caller that wishes to modify a kernel mapping that might | |
152 | * have been made with a huge page should call this function, | |
153 | * since doing so properly avoids race conditions with installing the | |
154 | * newly-shattered page and then flushing all the TLB entries. | |
155 | * | |
156 | * @addr: Address at which to shatter any existing huge page. | |
157 | */ | |
158 | void shatter_huge_page(unsigned long addr) | |
159 | { | |
160 | pgd_t *pgd; | |
161 | pud_t *pud; | |
162 | pmd_t *pmd; | |
163 | unsigned long flags = 0; /* happy compiler */ | |
164 | #ifdef __PAGETABLE_PMD_FOLDED | |
165 | struct list_head *pos; | |
166 | #endif | |
167 | ||
168 | /* Get a pointer to the pmd entry that we need to change. */ | |
169 | addr &= HPAGE_MASK; | |
170 | BUG_ON(pgd_addr_invalid(addr)); | |
171 | BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */ | |
172 | pgd = swapper_pg_dir + pgd_index(addr); | |
173 | pud = pud_offset(pgd, addr); | |
174 | BUG_ON(!pud_present(*pud)); | |
175 | pmd = pmd_offset(pud, addr); | |
176 | BUG_ON(!pmd_present(*pmd)); | |
177 | if (!pmd_huge_page(*pmd)) | |
178 | return; | |
179 | ||
180 | /* | |
181 | * Grab the pgd_lock, since we may need it to walk the pgd_list, | |
182 | * and since we need some kind of lock here to avoid races. | |
183 | */ | |
184 | spin_lock_irqsave(&pgd_lock, flags); | |
185 | if (!pmd_huge_page(*pmd)) { | |
186 | /* Lost the race to convert the huge page. */ | |
187 | spin_unlock_irqrestore(&pgd_lock, flags); | |
188 | return; | |
189 | } | |
190 | ||
191 | /* Shatter the huge page into the preallocated L2 page table. */ | |
192 | pmd_populate_kernel(&init_mm, pmd, | |
193 | get_prealloc_pte(pte_pfn(*(pte_t *)pmd))); | |
194 | ||
195 | #ifdef __PAGETABLE_PMD_FOLDED | |
196 | /* Walk every pgd on the system and update the pmd there. */ | |
197 | list_for_each(pos, &pgd_list) { | |
198 | pmd_t *copy_pmd; | |
199 | pgd = list_to_pgd(pos) + pgd_index(addr); | |
200 | pud = pud_offset(pgd, addr); | |
201 | copy_pmd = pmd_offset(pud, addr); | |
202 | __set_pmd(copy_pmd, *pmd); | |
203 | } | |
204 | #endif | |
205 | ||
206 | /* Tell every cpu to notice the change. */ | |
207 | flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE, | |
208 | cpu_possible_mask, NULL, 0); | |
209 | ||
210 | /* Hold the lock until the TLB flush is finished to avoid races. */ | |
211 | spin_unlock_irqrestore(&pgd_lock, flags); | |
212 | } | |
213 | ||
867e359b CM |
214 | /* |
215 | * List of all pgd's needed so it can invalidate entries in both cached | |
216 | * and uncached pgd's. This is essentially codepath-based locking | |
217 | * against pageattr.c; it is the unique case in which a valid change | |
218 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
219 | * vmalloc faults work because attached pagetables are never freed. | |
220 | * The locking scheme was chosen on the basis of manfred's | |
221 | * recommendations and having no core impact whatsoever. | |
222 | * -- wli | |
223 | */ | |
224 | DEFINE_SPINLOCK(pgd_lock); | |
225 | LIST_HEAD(pgd_list); | |
226 | ||
227 | static inline void pgd_list_add(pgd_t *pgd) | |
228 | { | |
229 | list_add(pgd_to_list(pgd), &pgd_list); | |
230 | } | |
231 | ||
232 | static inline void pgd_list_del(pgd_t *pgd) | |
233 | { | |
234 | list_del(pgd_to_list(pgd)); | |
235 | } | |
236 | ||
237 | #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET) | |
238 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START) | |
239 | ||
240 | static void pgd_ctor(pgd_t *pgd) | |
241 | { | |
242 | unsigned long flags; | |
243 | ||
244 | memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t)); | |
245 | spin_lock_irqsave(&pgd_lock, flags); | |
246 | ||
247 | #ifndef __tilegx__ | |
248 | /* | |
249 | * Check that the user interrupt vector has no L2. | |
250 | * It never should for the swapper, and new page tables | |
251 | * should always start with an empty user interrupt vector. | |
252 | */ | |
253 | BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0); | |
254 | #endif | |
255 | ||
76c567fb CM |
256 | memcpy(pgd + KERNEL_PGD_INDEX_START, |
257 | swapper_pg_dir + KERNEL_PGD_INDEX_START, | |
258 | KERNEL_PGD_PTRS * sizeof(pgd_t)); | |
867e359b CM |
259 | |
260 | pgd_list_add(pgd); | |
261 | spin_unlock_irqrestore(&pgd_lock, flags); | |
262 | } | |
263 | ||
264 | static void pgd_dtor(pgd_t *pgd) | |
265 | { | |
266 | unsigned long flags; /* can be called from interrupt context */ | |
267 | ||
268 | spin_lock_irqsave(&pgd_lock, flags); | |
269 | pgd_list_del(pgd); | |
270 | spin_unlock_irqrestore(&pgd_lock, flags); | |
271 | } | |
272 | ||
273 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
274 | { | |
275 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | |
276 | if (pgd) | |
277 | pgd_ctor(pgd); | |
278 | return pgd; | |
279 | } | |
280 | ||
281 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
282 | { | |
283 | pgd_dtor(pgd); | |
284 | kmem_cache_free(pgd_cache, pgd); | |
285 | } | |
286 | ||
287 | ||
288 | #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) | |
289 | ||
290 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
291 | { | |
76c567fb | 292 | gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; |
867e359b | 293 | struct page *p; |
76c567fb CM |
294 | #if L2_USER_PGTABLE_ORDER > 0 |
295 | int i; | |
296 | #endif | |
867e359b CM |
297 | |
298 | #ifdef CONFIG_HIGHPTE | |
299 | flags |= __GFP_HIGHMEM; | |
300 | #endif | |
301 | ||
302 | p = alloc_pages(flags, L2_USER_PGTABLE_ORDER); | |
303 | if (p == NULL) | |
304 | return NULL; | |
305 | ||
76c567fb CM |
306 | #if L2_USER_PGTABLE_ORDER > 0 |
307 | /* | |
308 | * Make every page have a page_count() of one, not just the first. | |
309 | * We don't use __GFP_COMP since it doesn't look like it works | |
310 | * correctly with tlb_remove_page(). | |
311 | */ | |
312 | for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { | |
313 | init_page_count(p+i); | |
314 | inc_zone_page_state(p+i, NR_PAGETABLE); | |
315 | } | |
316 | #endif | |
317 | ||
867e359b CM |
318 | pgtable_page_ctor(p); |
319 | return p; | |
320 | } | |
321 | ||
322 | /* | |
323 | * Free page immediately (used in __pte_alloc if we raced with another | |
324 | * process). We have to correct whatever pte_alloc_one() did before | |
325 | * returning the pages to the allocator. | |
326 | */ | |
327 | void pte_free(struct mm_struct *mm, struct page *p) | |
328 | { | |
76c567fb CM |
329 | int i; |
330 | ||
867e359b | 331 | pgtable_page_dtor(p); |
76c567fb CM |
332 | __free_page(p); |
333 | ||
334 | for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { | |
335 | __free_page(p+i); | |
336 | dec_zone_page_state(p+i, NR_PAGETABLE); | |
337 | } | |
867e359b CM |
338 | } |
339 | ||
340 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | |
341 | unsigned long address) | |
342 | { | |
343 | int i; | |
344 | ||
345 | pgtable_page_dtor(pte); | |
76c567fb CM |
346 | tlb_remove_page(tlb, pte); |
347 | ||
348 | for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { | |
342d87ef | 349 | tlb_remove_page(tlb, pte + i); |
76c567fb CM |
350 | dec_zone_page_state(pte + i, NR_PAGETABLE); |
351 | } | |
867e359b CM |
352 | } |
353 | ||
354 | #ifndef __tilegx__ | |
355 | ||
356 | /* | |
357 | * FIXME: needs to be atomic vs hypervisor writes. For now we make the | |
358 | * window of vulnerability a bit smaller by doing an unlocked 8-bit update. | |
359 | */ | |
360 | int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
361 | unsigned long addr, pte_t *ptep) | |
362 | { | |
363 | #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16 | |
364 | # error Code assumes HV_PTE "accessed" bit in second byte | |
365 | #endif | |
366 | u8 *tmp = (u8 *)ptep; | |
367 | u8 second_byte = tmp[1]; | |
368 | if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8)))) | |
369 | return 0; | |
370 | tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8)); | |
371 | return 1; | |
372 | } | |
373 | ||
374 | /* | |
375 | * This implementation is atomic vs hypervisor writes, since the hypervisor | |
376 | * always writes the low word (where "accessed" and "dirty" are) and this | |
377 | * routine only writes the high word. | |
378 | */ | |
379 | void ptep_set_wrprotect(struct mm_struct *mm, | |
380 | unsigned long addr, pte_t *ptep) | |
381 | { | |
382 | #if HV_PTE_INDEX_WRITABLE < 32 | |
383 | # error Code assumes HV_PTE "writable" bit in high word | |
384 | #endif | |
385 | u32 *tmp = (u32 *)ptep; | |
386 | tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32)); | |
387 | } | |
388 | ||
389 | #endif | |
390 | ||
391 | pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) | |
392 | { | |
393 | pgd_t *pgd; | |
394 | pud_t *pud; | |
395 | pmd_t *pmd; | |
396 | ||
397 | if (pgd_addr_invalid(addr)) | |
398 | return NULL; | |
399 | ||
400 | pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr); | |
401 | pud = pud_offset(pgd, addr); | |
402 | if (!pud_present(*pud)) | |
403 | return NULL; | |
404 | pmd = pmd_offset(pud, addr); | |
405 | if (pmd_huge_page(*pmd)) | |
406 | return (pte_t *)pmd; | |
407 | if (!pmd_present(*pmd)) | |
408 | return NULL; | |
409 | return pte_offset_kernel(pmd, addr); | |
410 | } | |
411 | ||
412 | pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) | |
413 | { | |
414 | unsigned int width = smp_width; | |
415 | int x = cpu % width; | |
416 | int y = cpu / width; | |
417 | BUG_ON(y >= smp_height); | |
418 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | |
419 | BUG_ON(cpu < 0 || cpu >= NR_CPUS); | |
420 | BUG_ON(!cpu_is_valid_lotar(cpu)); | |
421 | return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y)); | |
422 | } | |
423 | ||
424 | int get_remote_cache_cpu(pgprot_t prot) | |
425 | { | |
426 | HV_LOTAR lotar = hv_pte_get_lotar(prot); | |
427 | int x = HV_LOTAR_X(lotar); | |
428 | int y = HV_LOTAR_Y(lotar); | |
429 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | |
430 | return x + y * smp_width; | |
431 | } | |
432 | ||
76c567fb CM |
433 | /* |
434 | * Convert a kernel VA to a PA and homing information. | |
435 | */ | |
436 | int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte) | |
867e359b | 437 | { |
76c567fb CM |
438 | struct page *page = virt_to_page(va); |
439 | pte_t null_pte = { 0 }; | |
867e359b | 440 | |
76c567fb CM |
441 | *cpa = __pa(va); |
442 | ||
443 | /* Note that this is not writing a page table, just returning a pte. */ | |
444 | *pte = pte_set_home(null_pte, page_home(page)); | |
867e359b | 445 | |
76c567fb CM |
446 | return 0; /* return non-zero if not hfh? */ |
447 | } | |
448 | EXPORT_SYMBOL(va_to_cpa_and_pte); | |
449 | ||
450 | void __set_pte(pte_t *ptep, pte_t pte) | |
451 | { | |
867e359b CM |
452 | #ifdef __tilegx__ |
453 | *ptep = pte; | |
454 | #else | |
76c567fb CM |
455 | # if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32 |
456 | # error Must write the present and migrating bits last | |
457 | # endif | |
458 | if (pte_present(pte)) { | |
459 | ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); | |
460 | barrier(); | |
461 | ((u32 *)ptep)[0] = (u32)(pte_val(pte)); | |
462 | } else { | |
463 | ((u32 *)ptep)[0] = (u32)(pte_val(pte)); | |
464 | barrier(); | |
465 | ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); | |
466 | } | |
467 | #endif /* __tilegx__ */ | |
468 | } | |
469 | ||
470 | void set_pte(pte_t *ptep, pte_t pte) | |
471 | { | |
472 | struct page *page = pfn_to_page(pte_pfn(pte)); | |
473 | ||
474 | /* Update the home of a PTE if necessary */ | |
475 | pte = pte_set_home(pte, page_home(page)); | |
476 | ||
477 | __set_pte(ptep, pte); | |
867e359b CM |
478 | } |
479 | ||
480 | /* Can this mm load a PTE with cached_priority set? */ | |
481 | static inline int mm_is_priority_cached(struct mm_struct *mm) | |
482 | { | |
483 | return mm->context.priority_cached; | |
484 | } | |
485 | ||
486 | /* | |
487 | * Add a priority mapping to an mm_context and | |
488 | * notify the hypervisor if this is the first one. | |
489 | */ | |
490 | void start_mm_caching(struct mm_struct *mm) | |
491 | { | |
492 | if (!mm_is_priority_cached(mm)) { | |
493 | mm->context.priority_cached = -1U; | |
494 | hv_set_caching(-1U); | |
495 | } | |
496 | } | |
497 | ||
498 | /* | |
499 | * Validate and return the priority_cached flag. We know if it's zero | |
500 | * that we don't need to scan, since we immediately set it non-zero | |
501 | * when we first consider a MAP_CACHE_PRIORITY mapping. | |
502 | * | |
503 | * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it, | |
504 | * since we're in an interrupt context (servicing switch_mm) we don't | |
505 | * worry about it and don't unset the "priority_cached" field. | |
506 | * Presumably we'll come back later and have more luck and clear | |
507 | * the value then; for now we'll just keep the cache marked for priority. | |
508 | */ | |
509 | static unsigned int update_priority_cached(struct mm_struct *mm) | |
510 | { | |
511 | if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { | |
512 | struct vm_area_struct *vm; | |
513 | for (vm = mm->mmap; vm; vm = vm->vm_next) { | |
514 | if (hv_pte_get_cached_priority(vm->vm_page_prot)) | |
515 | break; | |
516 | } | |
517 | if (vm == NULL) | |
518 | mm->context.priority_cached = 0; | |
519 | up_write(&mm->mmap_sem); | |
520 | } | |
521 | return mm->context.priority_cached; | |
522 | } | |
523 | ||
524 | /* Set caching correctly for an mm that we are switching to. */ | |
525 | void check_mm_caching(struct mm_struct *prev, struct mm_struct *next) | |
526 | { | |
527 | if (!mm_is_priority_cached(next)) { | |
528 | /* | |
529 | * If the new mm doesn't use priority caching, just see if we | |
530 | * need the hv_set_caching(), or can assume it's already zero. | |
531 | */ | |
532 | if (mm_is_priority_cached(prev)) | |
533 | hv_set_caching(0); | |
534 | } else { | |
535 | hv_set_caching(update_priority_cached(next)); | |
536 | } | |
537 | } | |
538 | ||
539 | #if CHIP_HAS_MMIO() | |
540 | ||
541 | /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */ | |
542 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | |
543 | pgprot_t home) | |
544 | { | |
545 | void *addr; | |
546 | struct vm_struct *area; | |
547 | unsigned long offset, last_addr; | |
548 | pgprot_t pgprot; | |
549 | ||
550 | /* Don't allow wraparound or zero size */ | |
551 | last_addr = phys_addr + size - 1; | |
552 | if (!size || last_addr < phys_addr) | |
553 | return NULL; | |
554 | ||
555 | /* Create a read/write, MMIO VA mapping homed at the requested shim. */ | |
556 | pgprot = PAGE_KERNEL; | |
557 | pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO); | |
558 | pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home)); | |
559 | ||
560 | /* | |
561 | * Mappings have to be page-aligned | |
562 | */ | |
563 | offset = phys_addr & ~PAGE_MASK; | |
564 | phys_addr &= PAGE_MASK; | |
565 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | |
566 | ||
567 | /* | |
568 | * Ok, go for it.. | |
569 | */ | |
570 | area = get_vm_area(size, VM_IOREMAP /* | other flags? */); | |
571 | if (!area) | |
572 | return NULL; | |
573 | area->phys_addr = phys_addr; | |
574 | addr = area->addr; | |
575 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, | |
576 | phys_addr, pgprot)) { | |
577 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | |
578 | return NULL; | |
579 | } | |
580 | return (__force void __iomem *) (offset + (char *)addr); | |
581 | } | |
582 | EXPORT_SYMBOL(ioremap_prot); | |
583 | ||
584 | /* Map a PCI MMIO bus address into VA space. */ | |
585 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | |
586 | { | |
587 | panic("ioremap for PCI MMIO is not supported"); | |
588 | } | |
589 | EXPORT_SYMBOL(ioremap); | |
590 | ||
591 | /* Unmap an MMIO VA mapping. */ | |
592 | void iounmap(volatile void __iomem *addr_in) | |
593 | { | |
594 | volatile void __iomem *addr = (volatile void __iomem *) | |
595 | (PAGE_MASK & (unsigned long __force)addr_in); | |
596 | #if 1 | |
597 | vunmap((void * __force)addr); | |
598 | #else | |
599 | /* x86 uses this complicated flow instead of vunmap(). Is | |
600 | * there any particular reason we should do the same? */ | |
601 | struct vm_struct *p, *o; | |
602 | ||
603 | /* Use the vm area unlocked, assuming the caller | |
604 | ensures there isn't another iounmap for the same address | |
605 | in parallel. Reuse of the virtual address is prevented by | |
606 | leaving it in the global lists until we're done with it. | |
607 | cpa takes care of the direct mappings. */ | |
608 | read_lock(&vmlist_lock); | |
609 | for (p = vmlist; p; p = p->next) { | |
610 | if (p->addr == addr) | |
611 | break; | |
612 | } | |
613 | read_unlock(&vmlist_lock); | |
614 | ||
615 | if (!p) { | |
0707ad30 | 616 | pr_err("iounmap: bad address %p\n", addr); |
867e359b CM |
617 | dump_stack(); |
618 | return; | |
619 | } | |
620 | ||
621 | /* Finally remove it */ | |
622 | o = remove_vm_area((void *)addr); | |
623 | BUG_ON(p != o || o == NULL); | |
624 | kfree(p); | |
625 | #endif | |
626 | } | |
627 | EXPORT_SYMBOL(iounmap); | |
628 | ||
629 | #endif /* CHIP_HAS_MMIO() */ |