Commit | Line | Data |
---|---|---|
5e696617 BH |
1 | /* |
2 | * This file contains the routines for handling the MMU on those | |
3 | * PowerPC implementations where the MMU is not using the hash | |
4 | * table, such as 8xx, 4xx, BookE's etc... | |
5 | * | |
6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> | |
7 | * IBM Corp. | |
8 | * | |
9 | * Derived from previous arch/powerpc/mm/mmu_context.c | |
10 | * and arch/powerpc/include/asm/mmu_context.h | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | * | |
17 | */ | |
18 | ||
19 | #include <linux/mm.h> | |
20 | #include <linux/init.h> | |
21 | ||
22 | #include <asm/mmu_context.h> | |
23 | #include <asm/tlbflush.h> | |
24 | ||
25 | /* | |
26 | * The MPC8xx has only 16 contexts. We rotate through them on each | |
27 | * task switch. A better way would be to keep track of tasks that | |
28 | * own contexts, and implement an LRU usage. That way very active | |
29 | * tasks don't always have to pay the TLB reload overhead. The | |
30 | * kernel pages are mapped shared, so the kernel can run on behalf | |
31 | * of any task that makes a kernel entry. Shared does not mean they | |
32 | * are not protected, just that the ASID comparison is not performed. | |
33 | * -- Dan | |
34 | * | |
35 | * The IBM4xx has 256 contexts, so we can just rotate through these | |
36 | * as a way of "switching" contexts. If the TID of the TLB is zero, | |
37 | * the PID/TID comparison is disabled, so we can use a TID of zero | |
38 | * to represent all kernel pages as shared among all contexts. | |
39 | * -- Dan | |
40 | */ | |
41 | ||
42 | #ifdef CONFIG_8xx | |
43 | #define NO_CONTEXT 16 | |
44 | #define LAST_CONTEXT 15 | |
45 | #define FIRST_CONTEXT 0 | |
46 | ||
47 | #elif defined(CONFIG_4xx) | |
48 | #define NO_CONTEXT 256 | |
49 | #define LAST_CONTEXT 255 | |
50 | #define FIRST_CONTEXT 1 | |
51 | ||
52 | #elif defined(CONFIG_E200) || defined(CONFIG_E500) | |
53 | #define NO_CONTEXT 256 | |
54 | #define LAST_CONTEXT 255 | |
55 | #define FIRST_CONTEXT 1 | |
56 | ||
57 | #else | |
58 | #error Unsupported processor type | |
59 | #endif | |
60 | ||
61 | static unsigned long next_mmu_context; | |
62 | static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | |
63 | static atomic_t nr_free_contexts; | |
64 | static struct mm_struct *context_mm[LAST_CONTEXT+1]; | |
65 | static void steal_context(void); | |
66 | ||
67 | /* Steal a context from a task that has one at the moment. | |
68 | * This is only used on 8xx and 4xx and we presently assume that | |
69 | * they don't do SMP. If they do then this will have to check | |
70 | * whether the MM we steal is in use. | |
71 | * We also assume that this is only used on systems that don't | |
72 | * use an MMU hash table - this is true for 8xx and 4xx. | |
73 | * This isn't an LRU system, it just frees up each context in | |
74 | * turn (sort-of pseudo-random replacement :). This would be the | |
75 | * place to implement an LRU scheme if anyone was motivated to do it. | |
76 | * -- paulus | |
77 | */ | |
78 | static void steal_context(void) | |
79 | { | |
80 | struct mm_struct *mm; | |
81 | ||
82 | /* free up context `next_mmu_context' */ | |
83 | /* if we shouldn't free context 0, don't... */ | |
84 | if (next_mmu_context < FIRST_CONTEXT) | |
85 | next_mmu_context = FIRST_CONTEXT; | |
86 | mm = context_mm[next_mmu_context]; | |
87 | flush_tlb_mm(mm); | |
88 | destroy_context(mm); | |
89 | } | |
90 | ||
91 | ||
92 | /* | |
93 | * Get a new mmu context for the address space described by `mm'. | |
94 | */ | |
95 | static inline void get_mmu_context(struct mm_struct *mm) | |
96 | { | |
97 | unsigned long ctx; | |
98 | ||
99 | if (mm->context.id != NO_CONTEXT) | |
100 | return; | |
101 | ||
102 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) | |
103 | steal_context(); | |
104 | ||
105 | ctx = next_mmu_context; | |
106 | while (test_and_set_bit(ctx, context_map)) { | |
107 | ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); | |
108 | if (ctx > LAST_CONTEXT) | |
109 | ctx = 0; | |
110 | } | |
111 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; | |
112 | mm->context.id = ctx; | |
113 | context_mm[ctx] = mm; | |
114 | } | |
115 | ||
116 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |
117 | { | |
118 | get_mmu_context(next); | |
119 | ||
120 | set_context(next->context.id, next->pgd); | |
121 | } | |
122 | ||
123 | /* | |
124 | * Set up the context for a new address space. | |
125 | */ | |
126 | int init_new_context(struct task_struct *t, struct mm_struct *mm) | |
127 | { | |
128 | mm->context.id = NO_CONTEXT; | |
129 | return 0; | |
130 | } | |
131 | ||
132 | /* | |
133 | * We're finished using the context for an address space. | |
134 | */ | |
135 | void destroy_context(struct mm_struct *mm) | |
136 | { | |
137 | preempt_disable(); | |
138 | if (mm->context.id != NO_CONTEXT) { | |
139 | clear_bit(mm->context.id, context_map); | |
140 | mm->context.id = NO_CONTEXT; | |
141 | atomic_inc(&nr_free_contexts); | |
142 | } | |
143 | preempt_enable(); | |
144 | } | |
145 | ||
146 | ||
147 | /* | |
148 | * Initialize the context management stuff. | |
149 | */ | |
150 | void __init mmu_context_init(void) | |
151 | { | |
152 | /* | |
153 | * Some processors have too few contexts to reserve one for | |
154 | * init_mm, and require using context 0 for a normal task. | |
155 | * Other processors reserve the use of context zero for the kernel. | |
156 | * This code assumes FIRST_CONTEXT < 32. | |
157 | */ | |
158 | context_map[0] = (1 << FIRST_CONTEXT) - 1; | |
159 | next_mmu_context = FIRST_CONTEXT; | |
160 | atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); | |
161 | } | |
162 |