Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | #ifndef __SPARC64_MMU_CONTEXT_H |
2 | #define __SPARC64_MMU_CONTEXT_H | |
3 | ||
4 | /* Derived heavily from Linus's Alpha/AXP ASN code... */ | |
5 | ||
6 | #ifndef __ASSEMBLY__ | |
7 | ||
8 | #include <linux/spinlock.h> | |
f5e706ad SR |
9 | #include <asm/spitfire.h> |
10 | #include <asm-generic/mm_hooks.h> | |
11 | ||
12 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
13 | { | |
14 | } | |
15 | ||
16 | extern spinlock_t ctx_alloc_lock; | |
17 | extern unsigned long tlb_context_cache; | |
18 | extern unsigned long mmu_context_bmap[]; | |
19 | ||
f05a6865 | 20 | void get_new_mmu_context(struct mm_struct *mm); |
f5e706ad | 21 | #ifdef CONFIG_SMP |
f05a6865 | 22 | void smp_new_mmu_context_version(void); |
f5e706ad SR |
23 | #else |
24 | #define smp_new_mmu_context_version() do { } while (0) | |
25 | #endif | |
26 | ||
f05a6865 SR |
27 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
28 | void destroy_context(struct mm_struct *mm); | |
f5e706ad | 29 | |
f05a6865 SR |
30 | void __tsb_context_switch(unsigned long pgd_pa, |
31 | struct tsb_config *tsb_base, | |
32 | struct tsb_config *tsb_huge, | |
33 | unsigned long tsb_descr_pa); | |
f5e706ad SR |
34 | |
35 | static inline void tsb_context_switch(struct mm_struct *mm) | |
36 | { | |
37 | __tsb_context_switch(__pa(mm->pgd), | |
38 | &mm->context.tsb_block[0], | |
9e695d2e | 39 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
f5e706ad SR |
40 | (mm->context.tsb_block[1].tsb ? |
41 | &mm->context.tsb_block[1] : | |
42 | NULL) | |
43 | #else | |
44 | NULL | |
45 | #endif | |
46 | , __pa(&mm->context.tsb_descr[0])); | |
47 | } | |
48 | ||
f05a6865 SR |
49 | void tsb_grow(struct mm_struct *mm, |
50 | unsigned long tsb_index, | |
51 | unsigned long mm_rss); | |
f5e706ad | 52 | #ifdef CONFIG_SMP |
f05a6865 | 53 | void smp_tsb_sync(struct mm_struct *mm); |
f5e706ad SR |
54 | #else |
55 | #define smp_tsb_sync(__mm) do { } while (0) | |
56 | #endif | |
57 | ||
58 | /* Set MMU context in the actual hardware. */ | |
59 | #define load_secondary_context(__mm) \ | |
60 | __asm__ __volatile__( \ | |
61 | "\n661: stxa %0, [%1] %2\n" \ | |
62 | " .section .sun4v_1insn_patch, \"ax\"\n" \ | |
63 | " .word 661b\n" \ | |
64 | " stxa %0, [%1] %3\n" \ | |
65 | " .previous\n" \ | |
66 | " flush %%g6\n" \ | |
67 | : /* No outputs */ \ | |
68 | : "r" (CTX_HWBITS((__mm)->context)), \ | |
69 | "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) | |
70 | ||
f05a6865 | 71 | void __flush_tlb_mm(unsigned long, unsigned long); |
f5e706ad | 72 | |
07df8418 | 73 | /* Switch the current MM context. */ |
f5e706ad SR |
74 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
75 | { | |
76 | unsigned long ctx_valid, flags; | |
77 | int cpu; | |
78 | ||
79 | if (unlikely(mm == &init_mm)) | |
80 | return; | |
81 | ||
82 | spin_lock_irqsave(&mm->context.lock, flags); | |
83 | ctx_valid = CTX_VALID(mm->context); | |
84 | if (!ctx_valid) | |
85 | get_new_mmu_context(mm); | |
86 | ||
87 | /* We have to be extremely careful here or else we will miss | |
88 | * a TSB grow if we switch back and forth between a kernel | |
89 | * thread and an address space which has it's TSB size increased | |
90 | * on another processor. | |
91 | * | |
92 | * It is possible to play some games in order to optimize the | |
93 | * switch, but the safest thing to do is to unconditionally | |
94 | * perform the secondary context load and the TSB context switch. | |
95 | * | |
96 | * For reference the bad case is, for address space "A": | |
97 | * | |
98 | * CPU 0 CPU 1 | |
99 | * run address space A | |
100 | * set cpu0's bits in cpu_vm_mask | |
101 | * switch to kernel thread, borrow | |
102 | * address space A via entry_lazy_tlb | |
103 | * run address space A | |
104 | * set cpu1's bit in cpu_vm_mask | |
105 | * flush_tlb_pending() | |
106 | * reset cpu_vm_mask to just cpu1 | |
107 | * TSB grow | |
108 | * run address space A | |
109 | * context was valid, so skip | |
110 | * TSB context switch | |
111 | * | |
112 | * At that point cpu0 continues to use a stale TSB, the one from | |
113 | * before the TSB grow performed on cpu1. cpu1 did not cross-call | |
114 | * cpu0 to update it's TSB because at that point the cpu_vm_mask | |
115 | * only had cpu1 set in it. | |
116 | */ | |
117 | load_secondary_context(mm); | |
118 | tsb_context_switch(mm); | |
119 | ||
120 | /* Any time a processor runs a context on an address space | |
121 | * for the first time, we must flush that context out of the | |
122 | * local TLB. | |
123 | */ | |
124 | cpu = smp_processor_id(); | |
81f1adf0 RR |
125 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
126 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
f5e706ad SR |
127 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
128 | SECONDARY_CONTEXT); | |
129 | } | |
130 | spin_unlock_irqrestore(&mm->context.lock, flags); | |
131 | } | |
132 | ||
133 | #define deactivate_mm(tsk,mm) do { } while (0) | |
134 | ||
135 | /* Activate a new MM instance for the current task. */ | |
136 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) | |
137 | { | |
138 | unsigned long flags; | |
139 | int cpu; | |
140 | ||
141 | spin_lock_irqsave(&mm->context.lock, flags); | |
142 | if (!CTX_VALID(mm->context)) | |
143 | get_new_mmu_context(mm); | |
144 | cpu = smp_processor_id(); | |
81f1adf0 RR |
145 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) |
146 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | |
f5e706ad SR |
147 | |
148 | load_secondary_context(mm); | |
149 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); | |
150 | tsb_context_switch(mm); | |
151 | spin_unlock_irqrestore(&mm->context.lock, flags); | |
152 | } | |
153 | ||
154 | #endif /* !(__ASSEMBLY__) */ | |
155 | ||
156 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |