Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #ifndef _ASM_TILE_MMU_CONTEXT_H | |
16 | #define _ASM_TILE_MMU_CONTEXT_H | |
17 | ||
18 | #include <linux/smp.h> | |
19 | #include <asm/setup.h> | |
20 | #include <asm/page.h> | |
21 | #include <asm/pgalloc.h> | |
22 | #include <asm/pgtable.h> | |
23 | #include <asm/tlbflush.h> | |
24 | #include <asm/homecache.h> | |
25 | #include <asm-generic/mm_hooks.h> | |
26 | ||
27 | static inline int | |
28 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
29 | { | |
30 | return 0; | |
31 | } | |
32 | ||
33 | /* Note that arch/tile/kernel/head.S also calls hv_install_context() */ | |
34 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) | |
35 | { | |
36 | /* FIXME: DIRECTIO should not always be set. FIXME. */ | |
37 | int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); | |
38 | if (rc < 0) | |
39 | panic("hv_install_context failed: %d", rc); | |
40 | } | |
41 | ||
42 | static inline void install_page_table(pgd_t *pgdir, int asid) | |
43 | { | |
44 | pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); | |
45 | __install_page_table(pgdir, asid, *ptep); | |
46 | } | |
47 | ||
48 | /* | |
49 | * "Lazy" TLB mode is entered when we are switching to a kernel task, | |
50 | * which borrows the mm of the previous task. The goal of this | |
51 | * optimization is to avoid having to install a new page table. On | |
52 | * early x86 machines (where the concept originated) you couldn't do | |
53 | * anything short of a full page table install for invalidation, so | |
54 | * handling a remote TLB invalidate required doing a page table | |
55 | * re-install. Someone clearly decided that it was silly to keep | |
56 | * doing this while in "lazy" TLB mode, so the optimization involves | |
57 | * installing the swapper page table instead the first time one | |
58 | * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running | |
59 | * the kernel task doesn't need to take any more interrupts. At that | |
60 | * point it's then necessary to explicitly reinstall it when context | |
61 | * switching back to the original mm. | |
62 | * | |
63 | * On Tile, we have to do a page-table install whenever DMA is enabled, | |
64 | * so in that case lazy mode doesn't help anyway. And more generally, | |
65 | * we have efficient per-page TLB shootdown, and don't expect to spend | |
66 | * that much time in kernel tasks in general, so just leaving the | |
67 | * kernel task borrowing the old page table, but handling TLB | |
68 | * shootdowns, is a reasonable thing to do. And importantly, this | |
69 | * lets us use the hypervisor's internal APIs for TLB shootdown, which | |
70 | * means we don't have to worry about having TLB shootdowns blocked | |
71 | * when Linux is disabling interrupts; see the page migration code for | |
72 | * an example of where it's important for TLB shootdowns to complete | |
73 | * even when interrupts are disabled at the Linux level. | |
74 | */ | |
75 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) | |
76 | { | |
77 | #if CHIP_HAS_TILE_DMA() | |
78 | /* | |
79 | * We have to do an "identity" page table switch in order to | |
80 | * clear any pending DMA interrupts. | |
81 | */ | |
82 | if (current->thread.tile_dma_state.enabled) | |
83 | install_page_table(mm->pgd, __get_cpu_var(current_asid)); | |
84 | #endif | |
85 | } | |
86 | ||
87 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
88 | struct task_struct *tsk) | |
89 | { | |
90 | if (likely(prev != next)) { | |
91 | ||
92 | int cpu = smp_processor_id(); | |
93 | ||
94 | /* Pick new ASID. */ | |
95 | int asid = __get_cpu_var(current_asid) + 1; | |
96 | if (asid > max_asid) { | |
97 | asid = min_asid; | |
98 | local_flush_tlb(); | |
99 | } | |
100 | __get_cpu_var(current_asid) = asid; | |
101 | ||
102 | /* Clear cpu from the old mm, and set it in the new one. */ | |
103 | cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); | |
104 | cpumask_set_cpu(cpu, &next->cpu_vm_mask); | |
105 | ||
106 | /* Re-load page tables */ | |
107 | install_page_table(next->pgd, asid); | |
108 | ||
109 | /* See how we should set the red/black cache info */ | |
110 | check_mm_caching(prev, next); | |
111 | ||
112 | /* | |
113 | * Since we're changing to a new mm, we have to flush | |
114 | * the icache in case some physical page now being mapped | |
115 | * has subsequently been repurposed and has new code. | |
116 | */ | |
117 | __flush_icache(); | |
118 | ||
119 | } | |
120 | } | |
121 | ||
122 | static inline void activate_mm(struct mm_struct *prev_mm, | |
123 | struct mm_struct *next_mm) | |
124 | { | |
125 | switch_mm(prev_mm, next_mm, NULL); | |
126 | } | |
127 | ||
128 | #define destroy_context(mm) do { } while (0) | |
129 | #define deactivate_mm(tsk, mm) do { } while (0) | |
130 | ||
131 | #endif /* _ASM_TILE_MMU_CONTEXT_H */ |