Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
d84b4711 | 2 | * linux/arch/arm/mm/context.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | |
b5466f87 WD |
5 | * Copyright (C) 2012 ARM Limited |
6 | * | |
7 | * Author: Will Deacon <will.deacon@arm.com> | |
1da177e4 LT |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | #include <linux/init.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/mm.h> | |
11805bcf CM |
16 | #include <linux/smp.h> |
17 | #include <linux/percpu.h> | |
1da177e4 LT |
18 | |
19 | #include <asm/mmu_context.h> | |
b5466f87 | 20 | #include <asm/smp_plat.h> |
575320d6 | 21 | #include <asm/thread_notify.h> |
1da177e4 | 22 | #include <asm/tlbflush.h> |
1fc84ae8 | 23 | #include <asm/proc-fns.h> |
1da177e4 | 24 | |
b5466f87 WD |
25 | /* |
26 | * On ARMv6, we have the following structure in the Context ID: | |
27 | * | |
28 | * 31 7 0 | |
29 | * +-------------------------+-----------+ | |
30 | * | process ID | ASID | | |
31 | * +-------------------------+-----------+ | |
32 | * | context ID | | |
33 | * +-------------------------------------+ | |
34 | * | |
35 | * The ASID is used to tag entries in the CPU caches and TLBs. | |
36 | * The context ID is used by debuggers and trace logic, and | |
37 | * should be unique within all running processes. | |
9520a5be BD |
38 | * |
39 | * In big endian operation, the two 32 bit words are swapped if accesed by | |
40 | * non 64-bit operations. | |
b5466f87 WD |
41 | */ |
42 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | |
b8e4a474 | 43 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
b5466f87 | 44 | |
bd31b859 | 45 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
bf51bb82 WD |
46 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
47 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | |
b5466f87 | 48 | |
0d0752bc | 49 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
b5466f87 WD |
50 | static DEFINE_PER_CPU(u64, reserved_asids); |
51 | static cpumask_t tlb_flush_pending; | |
1da177e4 | 52 | |
0d0752bc MZ |
53 | #ifdef CONFIG_ARM_ERRATA_798181 |
54 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | |
55 | cpumask_t *mask) | |
56 | { | |
57 | int cpu; | |
58 | unsigned long flags; | |
59 | u64 context_id, asid; | |
60 | ||
61 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | |
62 | context_id = mm->context.id.counter; | |
63 | for_each_online_cpu(cpu) { | |
64 | if (cpu == this_cpu) | |
65 | continue; | |
66 | /* | |
67 | * We only need to send an IPI if the other CPUs are | |
68 | * running the same ASID as the one being invalidated. | |
69 | */ | |
70 | asid = per_cpu(active_asids, cpu).counter; | |
71 | if (asid == 0) | |
72 | asid = per_cpu(reserved_asids, cpu); | |
73 | if (context_id == asid) | |
74 | cpumask_set_cpu(cpu, mask); | |
75 | } | |
76 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | |
77 | } | |
78 | #endif | |
79 | ||
14d8c951 | 80 | #ifdef CONFIG_ARM_LPAE |
b5466f87 | 81 | static void cpu_set_reserved_ttbr0(void) |
3c5f7e7b | 82 | { |
3c5f7e7b WD |
83 | /* |
84 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The | |
85 | * ASID is set to 0. | |
86 | */ | |
1fc84ae8 | 87 | cpu_set_ttbr(0, __pa(swapper_pg_dir)); |
3c5f7e7b | 88 | isb(); |
14d8c951 CM |
89 | } |
90 | #else | |
b5466f87 | 91 | static void cpu_set_reserved_ttbr0(void) |
3c5f7e7b WD |
92 | { |
93 | u32 ttb; | |
94 | /* Copy TTBR1 into TTBR0 */ | |
95 | asm volatile( | |
96 | " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" | |
97 | " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" | |
98 | : "=r" (ttb)); | |
99 | isb(); | |
100 | } | |
14d8c951 CM |
101 | #endif |
102 | ||
575320d6 WD |
103 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
104 | static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, | |
105 | void *t) | |
106 | { | |
107 | u32 contextidr; | |
108 | pid_t pid; | |
109 | struct thread_info *thread = t; | |
110 | ||
111 | if (cmd != THREAD_NOTIFY_SWITCH) | |
112 | return NOTIFY_DONE; | |
113 | ||
114 | pid = task_pid_nr(thread->task) << ASID_BITS; | |
115 | asm volatile( | |
116 | " mrc p15, 0, %0, c13, c0, 1\n" | |
ae3790b8 WD |
117 | " and %0, %0, %2\n" |
118 | " orr %0, %0, %1\n" | |
119 | " mcr p15, 0, %0, c13, c0, 1\n" | |
575320d6 | 120 | : "=r" (contextidr), "+r" (pid) |
ae3790b8 | 121 | : "I" (~ASID_MASK)); |
575320d6 WD |
122 | isb(); |
123 | ||
124 | return NOTIFY_OK; | |
125 | } | |
126 | ||
127 | static struct notifier_block contextidr_notifier_block = { | |
128 | .notifier_call = contextidr_notifier, | |
129 | }; | |
130 | ||
131 | static int __init contextidr_notifier_init(void) | |
132 | { | |
133 | return thread_register_notifier(&contextidr_notifier_block); | |
134 | } | |
135 | arch_initcall(contextidr_notifier_init); | |
136 | #endif | |
137 | ||
b5466f87 | 138 | static void flush_context(unsigned int cpu) |
1da177e4 | 139 | { |
b5466f87 | 140 | int i; |
bf51bb82 WD |
141 | u64 asid; |
142 | ||
143 | /* Update the list of reserved ASIDs and the ASID bitmap. */ | |
144 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); | |
145 | for_each_possible_cpu(i) { | |
146 | if (i == cpu) { | |
147 | asid = 0; | |
148 | } else { | |
149 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); | |
ae120d9e MZ |
150 | /* |
151 | * If this CPU has already been through a | |
152 | * rollover, but hasn't run another task in | |
153 | * the meantime, we must preserve its reserved | |
154 | * ASID, as this is the only trace we have of | |
155 | * the process it is still running. | |
156 | */ | |
157 | if (asid == 0) | |
158 | asid = per_cpu(reserved_asids, i); | |
b8e4a474 | 159 | __set_bit(asid & ~ASID_MASK, asid_map); |
bf51bb82 WD |
160 | } |
161 | per_cpu(reserved_asids, i) = asid; | |
162 | } | |
b5466f87 WD |
163 | |
164 | /* Queue a TLB invalidate and flush the I-cache if necessary. */ | |
165 | if (!tlb_ops_need_broadcast()) | |
166 | cpumask_set_cpu(cpu, &tlb_flush_pending); | |
167 | else | |
168 | cpumask_setall(&tlb_flush_pending); | |
169 | ||
170 | if (icache_is_vivt_asid_tagged()) | |
11805bcf | 171 | __flush_icache_all(); |
11805bcf CM |
172 | } |
173 | ||
bf51bb82 | 174 | static int is_reserved_asid(u64 asid) |
b5466f87 WD |
175 | { |
176 | int cpu; | |
177 | for_each_possible_cpu(cpu) | |
bf51bb82 | 178 | if (per_cpu(reserved_asids, cpu) == asid) |
b5466f87 WD |
179 | return 1; |
180 | return 0; | |
181 | } | |
11805bcf | 182 | |
8a4e3a9e | 183 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
11805bcf | 184 | { |
8a4e3a9e | 185 | u64 asid = atomic64_read(&mm->context.id); |
bf51bb82 | 186 | u64 generation = atomic64_read(&asid_generation); |
11805bcf | 187 | |
bf51bb82 | 188 | if (asid != 0 && is_reserved_asid(asid)) { |
11805bcf | 189 | /* |
b5466f87 WD |
190 | * Our current ASID was active during a rollover, we can |
191 | * continue to use it and this was just a false alarm. | |
11805bcf | 192 | */ |
bf51bb82 | 193 | asid = generation | (asid & ~ASID_MASK); |
b5466f87 WD |
194 | } else { |
195 | /* | |
196 | * Allocate a free ASID. If we can't find one, take a | |
197 | * note of the currently active ASIDs and mark the TLBs | |
b8e4a474 MZ |
198 | * as requiring flushes. We always count from ASID #1, |
199 | * as we reserve ASID #0 to switch via TTBR0 and indicate | |
200 | * rollover events. | |
b5466f87 | 201 | */ |
b8e4a474 | 202 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
bf51bb82 WD |
203 | if (asid == NUM_USER_ASIDS) { |
204 | generation = atomic64_add_return(ASID_FIRST_VERSION, | |
205 | &asid_generation); | |
206 | flush_context(cpu); | |
b8e4a474 | 207 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
bf51bb82 WD |
208 | } |
209 | __set_bit(asid, asid_map); | |
b8e4a474 | 210 | asid |= generation; |
11805bcf CM |
211 | cpumask_clear(mm_cpumask(mm)); |
212 | } | |
11805bcf | 213 | |
8a4e3a9e | 214 | return asid; |
11805bcf CM |
215 | } |
216 | ||
b5466f87 | 217 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
11805bcf | 218 | { |
b5466f87 | 219 | unsigned long flags; |
11805bcf | 220 | unsigned int cpu = smp_processor_id(); |
8a4e3a9e | 221 | u64 asid; |
11805bcf | 222 | |
3e99675a NP |
223 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
224 | __check_vmalloc_seq(mm); | |
11805bcf | 225 | |
11805bcf | 226 | /* |
b5466f87 WD |
227 | * Required during context switch to avoid speculative page table |
228 | * walking with the wrong TTBR. | |
11805bcf | 229 | */ |
b5466f87 | 230 | cpu_set_reserved_ttbr0(); |
1da177e4 | 231 | |
8a4e3a9e WD |
232 | asid = atomic64_read(&mm->context.id); |
233 | if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) | |
234 | && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) | |
4b883160 WD |
235 | goto switch_mm_fastpath; |
236 | ||
b5466f87 WD |
237 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
238 | /* Check that our ASID belongs to the current generation. */ | |
8a4e3a9e WD |
239 | asid = atomic64_read(&mm->context.id); |
240 | if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { | |
241 | asid = new_context(mm, cpu); | |
242 | atomic64_set(&mm->context.id, asid); | |
243 | } | |
1da177e4 | 244 | |
89c7e4b8 WD |
245 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
246 | local_flush_bp_all(); | |
b5466f87 | 247 | local_flush_tlb_all(); |
93dc6887 | 248 | dummy_flush_tlb_a15_erratum(); |
89c7e4b8 | 249 | } |
37f47e3d | 250 | |
8a4e3a9e | 251 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
37f47e3d | 252 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
b5466f87 WD |
253 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
254 | ||
4b883160 | 255 | switch_mm_fastpath: |
b5466f87 | 256 | cpu_switch_mm(mm->pgd, mm); |
1da177e4 | 257 | } |