ARM: 7768/1: prevent risks of out-of-bound access in ASID allocator
[deliverable/linux.git] / arch / arm / mm / context.c
CommitLineData
1da177e4 1/*
d84b4711 2 * linux/arch/arm/mm/context.c
1da177e4
LT
3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
b5466f87
WD
5 * Copyright (C) 2012 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
1da177e4
LT
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/mm.h>
11805bcf
CM
16#include <linux/smp.h>
17#include <linux/percpu.h>
1da177e4
LT
18
19#include <asm/mmu_context.h>
b5466f87 20#include <asm/smp_plat.h>
575320d6 21#include <asm/thread_notify.h>
1da177e4
LT
22#include <asm/tlbflush.h>
23
b5466f87
WD
24/*
25 * On ARMv6, we have the following structure in the Context ID:
26 *
27 * 31 7 0
28 * +-------------------------+-----------+
29 * | process ID | ASID |
30 * +-------------------------+-----------+
31 * | context ID |
32 * +-------------------------------------+
33 *
34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes.
9520a5be
BD
37 *
38 * In big endian operation, the two 32 bit words are swapped if accesed by
39 * non 64-bit operations.
b5466f87
WD
40 */
41#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
b8e4a474 42#define NUM_USER_ASIDS ASID_FIRST_VERSION
b5466f87 43
bd31b859 44static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
bf51bb82
WD
45static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
46static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
b5466f87 47
93dc6887 48DEFINE_PER_CPU(atomic64_t, active_asids);
b5466f87
WD
49static DEFINE_PER_CPU(u64, reserved_asids);
50static cpumask_t tlb_flush_pending;
1da177e4 51
14d8c951 52#ifdef CONFIG_ARM_LPAE
b5466f87 53static void cpu_set_reserved_ttbr0(void)
3c5f7e7b
WD
54{
55 unsigned long ttbl = __pa(swapper_pg_dir);
56 unsigned long ttbh = 0;
57
58 /*
59 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
60 * ASID is set to 0.
61 */
62 asm volatile(
63 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
64 :
65 : "r" (ttbl), "r" (ttbh));
66 isb();
14d8c951
CM
67}
68#else
b5466f87 69static void cpu_set_reserved_ttbr0(void)
3c5f7e7b
WD
70{
71 u32 ttb;
72 /* Copy TTBR1 into TTBR0 */
73 asm volatile(
74 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
75 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
76 : "=r" (ttb));
77 isb();
78}
14d8c951
CM
79#endif
80
575320d6
WD
81#ifdef CONFIG_PID_IN_CONTEXTIDR
82static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
83 void *t)
84{
85 u32 contextidr;
86 pid_t pid;
87 struct thread_info *thread = t;
88
89 if (cmd != THREAD_NOTIFY_SWITCH)
90 return NOTIFY_DONE;
91
92 pid = task_pid_nr(thread->task) << ASID_BITS;
93 asm volatile(
94 " mrc p15, 0, %0, c13, c0, 1\n"
ae3790b8
WD
95 " and %0, %0, %2\n"
96 " orr %0, %0, %1\n"
97 " mcr p15, 0, %0, c13, c0, 1\n"
575320d6 98 : "=r" (contextidr), "+r" (pid)
ae3790b8 99 : "I" (~ASID_MASK));
575320d6
WD
100 isb();
101
102 return NOTIFY_OK;
103}
104
105static struct notifier_block contextidr_notifier_block = {
106 .notifier_call = contextidr_notifier,
107};
108
109static int __init contextidr_notifier_init(void)
110{
111 return thread_register_notifier(&contextidr_notifier_block);
112}
113arch_initcall(contextidr_notifier_init);
114#endif
115
b5466f87 116static void flush_context(unsigned int cpu)
1da177e4 117{
b5466f87 118 int i;
bf51bb82
WD
119 u64 asid;
120
121 /* Update the list of reserved ASIDs and the ASID bitmap. */
122 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
123 for_each_possible_cpu(i) {
124 if (i == cpu) {
125 asid = 0;
126 } else {
127 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
ae120d9e
MZ
128 /*
129 * If this CPU has already been through a
130 * rollover, but hasn't run another task in
131 * the meantime, we must preserve its reserved
132 * ASID, as this is the only trace we have of
133 * the process it is still running.
134 */
135 if (asid == 0)
136 asid = per_cpu(reserved_asids, i);
b8e4a474 137 __set_bit(asid & ~ASID_MASK, asid_map);
bf51bb82
WD
138 }
139 per_cpu(reserved_asids, i) = asid;
140 }
b5466f87
WD
141
142 /* Queue a TLB invalidate and flush the I-cache if necessary. */
143 if (!tlb_ops_need_broadcast())
144 cpumask_set_cpu(cpu, &tlb_flush_pending);
145 else
146 cpumask_setall(&tlb_flush_pending);
147
148 if (icache_is_vivt_asid_tagged())
11805bcf 149 __flush_icache_all();
11805bcf
CM
150}
151
bf51bb82 152static int is_reserved_asid(u64 asid)
b5466f87
WD
153{
154 int cpu;
155 for_each_possible_cpu(cpu)
bf51bb82 156 if (per_cpu(reserved_asids, cpu) == asid)
b5466f87
WD
157 return 1;
158 return 0;
159}
11805bcf 160
8a4e3a9e 161static u64 new_context(struct mm_struct *mm, unsigned int cpu)
11805bcf 162{
8a4e3a9e 163 u64 asid = atomic64_read(&mm->context.id);
bf51bb82 164 u64 generation = atomic64_read(&asid_generation);
11805bcf 165
bf51bb82 166 if (asid != 0 && is_reserved_asid(asid)) {
11805bcf 167 /*
b5466f87
WD
168 * Our current ASID was active during a rollover, we can
169 * continue to use it and this was just a false alarm.
11805bcf 170 */
bf51bb82 171 asid = generation | (asid & ~ASID_MASK);
b5466f87
WD
172 } else {
173 /*
174 * Allocate a free ASID. If we can't find one, take a
175 * note of the currently active ASIDs and mark the TLBs
b8e4a474
MZ
176 * as requiring flushes. We always count from ASID #1,
177 * as we reserve ASID #0 to switch via TTBR0 and indicate
178 * rollover events.
b5466f87 179 */
b8e4a474 180 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
bf51bb82
WD
181 if (asid == NUM_USER_ASIDS) {
182 generation = atomic64_add_return(ASID_FIRST_VERSION,
183 &asid_generation);
184 flush_context(cpu);
b8e4a474 185 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
bf51bb82
WD
186 }
187 __set_bit(asid, asid_map);
b8e4a474 188 asid |= generation;
11805bcf
CM
189 cpumask_clear(mm_cpumask(mm));
190 }
11805bcf 191
8a4e3a9e 192 return asid;
11805bcf
CM
193}
194
b5466f87 195void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
11805bcf 196{
b5466f87 197 unsigned long flags;
11805bcf 198 unsigned int cpu = smp_processor_id();
8a4e3a9e 199 u64 asid;
11805bcf 200
3e99675a
NP
201 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
202 __check_vmalloc_seq(mm);
11805bcf 203
11805bcf 204 /*
b5466f87
WD
205 * Required during context switch to avoid speculative page table
206 * walking with the wrong TTBR.
11805bcf 207 */
b5466f87 208 cpu_set_reserved_ttbr0();
1da177e4 209
8a4e3a9e
WD
210 asid = atomic64_read(&mm->context.id);
211 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
212 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4b883160
WD
213 goto switch_mm_fastpath;
214
b5466f87
WD
215 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
216 /* Check that our ASID belongs to the current generation. */
8a4e3a9e
WD
217 asid = atomic64_read(&mm->context.id);
218 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
219 asid = new_context(mm, cpu);
220 atomic64_set(&mm->context.id, asid);
221 }
1da177e4 222
89c7e4b8
WD
223 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
224 local_flush_bp_all();
b5466f87 225 local_flush_tlb_all();
93dc6887 226 dummy_flush_tlb_a15_erratum();
89c7e4b8 227 }
37f47e3d 228
8a4e3a9e 229 atomic64_set(&per_cpu(active_asids, cpu), asid);
37f47e3d 230 cpumask_set_cpu(cpu, mm_cpumask(mm));
b5466f87
WD
231 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
232
4b883160 233switch_mm_fastpath:
b5466f87 234 cpu_switch_mm(mm->pgd, mm);
1da177e4 235}
This page took 1.064422 seconds and 5 git commands to generate.