2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU is not using the hash
4 * table, such as 8xx, 4xx, BookE's etc...
6 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
9 * Derived from previous arch/powerpc/mm/mmu_context.c
10 * and arch/powerpc/include/asm/mmu_context.h
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/init.h>
22 #include <asm/mmu_context.h>
23 #include <asm/tlbflush.h>
26 * The MPC8xx has only 16 contexts. We rotate through them on each
27 * task switch. A better way would be to keep track of tasks that
28 * own contexts, and implement an LRU usage. That way very active
29 * tasks don't always have to pay the TLB reload overhead. The
30 * kernel pages are mapped shared, so the kernel can run on behalf
31 * of any task that makes a kernel entry. Shared does not mean they
32 * are not protected, just that the ASID comparison is not performed.
35 * The IBM4xx has 256 contexts, so we can just rotate through these
36 * as a way of "switching" contexts. If the TID of the TLB is zero,
37 * the PID/TID comparison is disabled, so we can use a TID of zero
38 * to represent all kernel pages as shared among all contexts.
44 #define LAST_CONTEXT 15
45 #define FIRST_CONTEXT 0
47 #elif defined(CONFIG_4xx)
48 #define NO_CONTEXT 256
49 #define LAST_CONTEXT 255
50 #define FIRST_CONTEXT 1
52 #elif defined(CONFIG_E200) || defined(CONFIG_E500)
53 #define NO_CONTEXT 256
54 #define LAST_CONTEXT 255
55 #define FIRST_CONTEXT 1
58 #error Unsupported processor type
61 static unsigned long next_mmu_context
;
62 static unsigned long context_map
[LAST_CONTEXT
/ BITS_PER_LONG
+ 1];
63 static atomic_t nr_free_contexts
;
64 static struct mm_struct
*context_mm
[LAST_CONTEXT
+1];
65 static void steal_context(void);
67 /* Steal a context from a task that has one at the moment.
68 * This is only used on 8xx and 4xx and we presently assume that
69 * they don't do SMP. If they do then this will have to check
70 * whether the MM we steal is in use.
71 * We also assume that this is only used on systems that don't
72 * use an MMU hash table - this is true for 8xx and 4xx.
73 * This isn't an LRU system, it just frees up each context in
74 * turn (sort-of pseudo-random replacement :). This would be the
75 * place to implement an LRU scheme if anyone was motivated to do it.
78 static void steal_context(void)
82 /* free up context `next_mmu_context' */
83 /* if we shouldn't free context 0, don't... */
84 if (next_mmu_context
< FIRST_CONTEXT
)
85 next_mmu_context
= FIRST_CONTEXT
;
86 mm
= context_mm
[next_mmu_context
];
93 * Get a new mmu context for the address space described by `mm'.
95 static inline void get_mmu_context(struct mm_struct
*mm
)
99 if (mm
->context
.id
!= NO_CONTEXT
)
102 while (atomic_dec_if_positive(&nr_free_contexts
) < 0)
105 ctx
= next_mmu_context
;
106 while (test_and_set_bit(ctx
, context_map
)) {
107 ctx
= find_next_zero_bit(context_map
, LAST_CONTEXT
+1, ctx
);
108 if (ctx
> LAST_CONTEXT
)
111 next_mmu_context
= (ctx
+ 1) & LAST_CONTEXT
;
112 mm
->context
.id
= ctx
;
113 context_mm
[ctx
] = mm
;
116 void switch_mmu_context(struct mm_struct
*prev
, struct mm_struct
*next
)
118 get_mmu_context(next
);
120 set_context(next
->context
.id
, next
->pgd
);
124 * Set up the context for a new address space.
126 int init_new_context(struct task_struct
*t
, struct mm_struct
*mm
)
128 mm
->context
.id
= NO_CONTEXT
;
133 * We're finished using the context for an address space.
135 void destroy_context(struct mm_struct
*mm
)
138 if (mm
->context
.id
!= NO_CONTEXT
) {
139 clear_bit(mm
->context
.id
, context_map
);
140 mm
->context
.id
= NO_CONTEXT
;
141 atomic_inc(&nr_free_contexts
);
148 * Initialize the context management stuff.
150 void __init
mmu_context_init(void)
153 * Some processors have too few contexts to reserve one for
154 * init_mm, and require using context 0 for a normal task.
155 * Other processors reserve the use of context zero for the kernel.
156 * This code assumes FIRST_CONTEXT < 32.
158 context_map
[0] = (1 << FIRST_CONTEXT
) - 1;
159 next_mmu_context
= FIRST_CONTEXT
;
160 atomic_set(&nr_free_contexts
, LAST_CONTEXT
- FIRST_CONTEXT
+ 1);