ARM: 8559/1: errata: Workaround erratum A12 821420
[deliverable/linux.git] / arch / arm / kernel / process.c
1 /*
2 * linux/arch/arm/kernel/process.c
3 *
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Original Copyright (C) 1995 Linus Torvalds
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <stdarg.h>
12
13 #include <linux/export.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/user.h>
20 #include <linux/interrupt.h>
21 #include <linux/kallsyms.h>
22 #include <linux/init.h>
23 #include <linux/elfcore.h>
24 #include <linux/pm.h>
25 #include <linux/tick.h>
26 #include <linux/utsname.h>
27 #include <linux/uaccess.h>
28 #include <linux/random.h>
29 #include <linux/hw_breakpoint.h>
30 #include <linux/leds.h>
31
32 #include <asm/processor.h>
33 #include <asm/thread_notify.h>
34 #include <asm/stacktrace.h>
35 #include <asm/system_misc.h>
36 #include <asm/mach/time.h>
37 #include <asm/tls.h>
38 #include <asm/vdso.h>
39
40 #ifdef CONFIG_CC_STACKPROTECTOR
41 #include <linux/stackprotector.h>
42 unsigned long __stack_chk_guard __read_mostly;
43 EXPORT_SYMBOL(__stack_chk_guard);
44 #endif
45
46 static const char *processor_modes[] __maybe_unused = {
47 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
48 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
49 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
50 "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
51 };
52
53 static const char *isa_modes[] __maybe_unused = {
54 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
55 };
56
57 /*
58 * This is our default idle handler.
59 */
60
61 void (*arm_pm_idle)(void);
62
63 /*
64 * Called from the core idle loop.
65 */
66
67 void arch_cpu_idle(void)
68 {
69 if (arm_pm_idle)
70 arm_pm_idle();
71 else
72 cpu_do_idle();
73 local_irq_enable();
74 }
75
76 void arch_cpu_idle_prepare(void)
77 {
78 local_fiq_enable();
79 }
80
81 void arch_cpu_idle_enter(void)
82 {
83 ledtrig_cpu(CPU_LED_IDLE_START);
84 #ifdef CONFIG_PL310_ERRATA_769419
85 wmb();
86 #endif
87 }
88
89 void arch_cpu_idle_exit(void)
90 {
91 ledtrig_cpu(CPU_LED_IDLE_END);
92 }
93
94 void __show_regs(struct pt_regs *regs)
95 {
96 unsigned long flags;
97 char buf[64];
98 #ifndef CONFIG_CPU_V7M
99 unsigned int domain, fs;
100 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
101 /*
102 * Get the domain register for the parent context. In user
103 * mode, we don't save the DACR, so lets use what it should
104 * be. For other modes, we place it after the pt_regs struct.
105 */
106 if (user_mode(regs)) {
107 domain = DACR_UACCESS_ENABLE;
108 fs = get_fs();
109 } else {
110 domain = to_svc_pt_regs(regs)->dacr;
111 fs = to_svc_pt_regs(regs)->addr_limit;
112 }
113 #else
114 domain = get_domain();
115 fs = get_fs();
116 #endif
117 #endif
118
119 show_regs_print_info(KERN_DEFAULT);
120
121 print_symbol("PC is at %s\n", instruction_pointer(regs));
122 print_symbol("LR is at %s\n", regs->ARM_lr);
123 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
124 "sp : %08lx ip : %08lx fp : %08lx\n",
125 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
126 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
127 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
128 regs->ARM_r10, regs->ARM_r9,
129 regs->ARM_r8);
130 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
131 regs->ARM_r7, regs->ARM_r6,
132 regs->ARM_r5, regs->ARM_r4);
133 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
134 regs->ARM_r3, regs->ARM_r2,
135 regs->ARM_r1, regs->ARM_r0);
136
137 flags = regs->ARM_cpsr;
138 buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
139 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
140 buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
141 buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
142 buf[4] = '\0';
143
144 #ifndef CONFIG_CPU_V7M
145 {
146 const char *segment;
147
148 if ((domain & domain_mask(DOMAIN_USER)) ==
149 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
150 segment = "none";
151 else if (fs == get_ds())
152 segment = "kernel";
153 else
154 segment = "user";
155
156 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
157 buf, interrupts_enabled(regs) ? "n" : "ff",
158 fast_interrupts_enabled(regs) ? "n" : "ff",
159 processor_modes[processor_mode(regs)],
160 isa_modes[isa_mode(regs)], segment);
161 }
162 #else
163 printk("xPSR: %08lx\n", regs->ARM_cpsr);
164 #endif
165
166 #ifdef CONFIG_CPU_CP15
167 {
168 unsigned int ctrl;
169
170 buf[0] = '\0';
171 #ifdef CONFIG_CPU_CP15_MMU
172 {
173 unsigned int transbase;
174 asm("mrc p15, 0, %0, c2, c0\n\t"
175 : "=r" (transbase));
176 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
177 transbase, domain);
178 }
179 #endif
180 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
181
182 printk("Control: %08x%s\n", ctrl, buf);
183 }
184 #endif
185 }
186
187 void show_regs(struct pt_regs * regs)
188 {
189 __show_regs(regs);
190 dump_stack();
191 }
192
193 ATOMIC_NOTIFIER_HEAD(thread_notify_head);
194
195 EXPORT_SYMBOL_GPL(thread_notify_head);
196
197 /*
198 * Free current thread data structures etc..
199 */
200 void exit_thread(struct task_struct *tsk)
201 {
202 thread_notify(THREAD_NOTIFY_EXIT, task_thread_info(tsk));
203 }
204
205 void flush_thread(void)
206 {
207 struct thread_info *thread = current_thread_info();
208 struct task_struct *tsk = current;
209
210 flush_ptrace_hw_breakpoint(tsk);
211
212 memset(thread->used_cp, 0, sizeof(thread->used_cp));
213 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
214 memset(&thread->fpstate, 0, sizeof(union fp_state));
215
216 flush_tls();
217
218 thread_notify(THREAD_NOTIFY_FLUSH, thread);
219 }
220
221 void release_thread(struct task_struct *dead_task)
222 {
223 }
224
225 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
226
227 int
228 copy_thread(unsigned long clone_flags, unsigned long stack_start,
229 unsigned long stk_sz, struct task_struct *p)
230 {
231 struct thread_info *thread = task_thread_info(p);
232 struct pt_regs *childregs = task_pt_regs(p);
233
234 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
235
236 #ifdef CONFIG_CPU_USE_DOMAINS
237 /*
238 * Copy the initial value of the domain access control register
239 * from the current thread: thread->addr_limit will have been
240 * copied from the current thread via setup_thread_stack() in
241 * kernel/fork.c
242 */
243 thread->cpu_domain = get_domain();
244 #endif
245
246 if (likely(!(p->flags & PF_KTHREAD))) {
247 *childregs = *current_pt_regs();
248 childregs->ARM_r0 = 0;
249 if (stack_start)
250 childregs->ARM_sp = stack_start;
251 } else {
252 memset(childregs, 0, sizeof(struct pt_regs));
253 thread->cpu_context.r4 = stk_sz;
254 thread->cpu_context.r5 = stack_start;
255 childregs->ARM_cpsr = SVC_MODE;
256 }
257 thread->cpu_context.pc = (unsigned long)ret_from_fork;
258 thread->cpu_context.sp = (unsigned long)childregs;
259
260 clear_ptrace_hw_breakpoint(p);
261
262 if (clone_flags & CLONE_SETTLS)
263 thread->tp_value[0] = childregs->ARM_r3;
264 thread->tp_value[1] = get_tpuser();
265
266 thread_notify(THREAD_NOTIFY_COPY, thread);
267
268 return 0;
269 }
270
271 /*
272 * Fill in the task's elfregs structure for a core dump.
273 */
274 int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
275 {
276 elf_core_copy_regs(elfregs, task_pt_regs(t));
277 return 1;
278 }
279
280 /*
281 * fill in the fpe structure for a core dump...
282 */
283 int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
284 {
285 struct thread_info *thread = current_thread_info();
286 int used_math = thread->used_cp[1] | thread->used_cp[2];
287
288 if (used_math)
289 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
290
291 return used_math != 0;
292 }
293 EXPORT_SYMBOL(dump_fpu);
294
295 unsigned long get_wchan(struct task_struct *p)
296 {
297 struct stackframe frame;
298 unsigned long stack_page;
299 int count = 0;
300 if (!p || p == current || p->state == TASK_RUNNING)
301 return 0;
302
303 frame.fp = thread_saved_fp(p);
304 frame.sp = thread_saved_sp(p);
305 frame.lr = 0; /* recovered from the stack */
306 frame.pc = thread_saved_pc(p);
307 stack_page = (unsigned long)task_stack_page(p);
308 do {
309 if (frame.sp < stack_page ||
310 frame.sp >= stack_page + THREAD_SIZE ||
311 unwind_frame(&frame) < 0)
312 return 0;
313 if (!in_sched_functions(frame.pc))
314 return frame.pc;
315 } while (count ++ < 16);
316 return 0;
317 }
318
319 unsigned long arch_randomize_brk(struct mm_struct *mm)
320 {
321 unsigned long range_end = mm->brk + 0x02000000;
322 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
323 }
324
325 #ifdef CONFIG_MMU
326 #ifdef CONFIG_KUSER_HELPERS
327 /*
328 * The vectors page is always readable from user space for the
329 * atomic helpers. Insert it into the gate_vma so that it is visible
330 * through ptrace and /proc/<pid>/mem.
331 */
332 static struct vm_area_struct gate_vma = {
333 .vm_start = 0xffff0000,
334 .vm_end = 0xffff0000 + PAGE_SIZE,
335 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
336 };
337
338 static int __init gate_vma_init(void)
339 {
340 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
341 return 0;
342 }
343 arch_initcall(gate_vma_init);
344
345 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
346 {
347 return &gate_vma;
348 }
349
350 int in_gate_area(struct mm_struct *mm, unsigned long addr)
351 {
352 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
353 }
354
355 int in_gate_area_no_mm(unsigned long addr)
356 {
357 return in_gate_area(NULL, addr);
358 }
359 #define is_gate_vma(vma) ((vma) == &gate_vma)
360 #else
361 #define is_gate_vma(vma) 0
362 #endif
363
364 const char *arch_vma_name(struct vm_area_struct *vma)
365 {
366 return is_gate_vma(vma) ? "[vectors]" : NULL;
367 }
368
369 /* If possible, provide a placement hint at a random offset from the
370 * stack for the sigpage and vdso pages.
371 */
372 static unsigned long sigpage_addr(const struct mm_struct *mm,
373 unsigned int npages)
374 {
375 unsigned long offset;
376 unsigned long first;
377 unsigned long last;
378 unsigned long addr;
379 unsigned int slots;
380
381 first = PAGE_ALIGN(mm->start_stack);
382
383 last = TASK_SIZE - (npages << PAGE_SHIFT);
384
385 /* No room after stack? */
386 if (first > last)
387 return 0;
388
389 /* Just enough room? */
390 if (first == last)
391 return first;
392
393 slots = ((last - first) >> PAGE_SHIFT) + 1;
394
395 offset = get_random_int() % slots;
396
397 addr = first + (offset << PAGE_SHIFT);
398
399 return addr;
400 }
401
402 static struct page *signal_page;
403 extern struct page *get_signal_page(void);
404
405 static const struct vm_special_mapping sigpage_mapping = {
406 .name = "[sigpage]",
407 .pages = &signal_page,
408 };
409
410 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
411 {
412 struct mm_struct *mm = current->mm;
413 struct vm_area_struct *vma;
414 unsigned long npages;
415 unsigned long addr;
416 unsigned long hint;
417 int ret = 0;
418
419 if (!signal_page)
420 signal_page = get_signal_page();
421 if (!signal_page)
422 return -ENOMEM;
423
424 npages = 1; /* for sigpage */
425 npages += vdso_total_pages;
426
427 if (down_write_killable(&mm->mmap_sem))
428 return -EINTR;
429 hint = sigpage_addr(mm, npages);
430 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
431 if (IS_ERR_VALUE(addr)) {
432 ret = addr;
433 goto up_fail;
434 }
435
436 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
437 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
438 &sigpage_mapping);
439
440 if (IS_ERR(vma)) {
441 ret = PTR_ERR(vma);
442 goto up_fail;
443 }
444
445 mm->context.sigpage = addr;
446
447 /* Unlike the sigpage, failure to install the vdso is unlikely
448 * to be fatal to the process, so no error check needed
449 * here.
450 */
451 arm_install_vdso(mm, addr + PAGE_SIZE);
452
453 up_fail:
454 up_write(&mm->mmap_sem);
455 return ret;
456 }
457 #endif
This page took 0.06164 seconds and 5 git commands to generate.