Commit | Line | Data |
---|---|---|
995473ae | 1 | /* |
ba180fd4 | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
1da177e4 LT |
3 | * Copyright 2003 PathScale, Inc. |
4 | * Licensed under the GPL | |
5 | */ | |
6 | ||
c5d4bb17 JD |
7 | #include <linux/stddef.h> |
8 | #include <linux/err.h> | |
9 | #include <linux/hardirq.h> | |
c5d4bb17 | 10 | #include <linux/mm.h> |
6613c5e8 | 11 | #include <linux/module.h> |
c5d4bb17 JD |
12 | #include <linux/personality.h> |
13 | #include <linux/proc_fs.h> | |
14 | #include <linux/ptrace.h> | |
15 | #include <linux/random.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
c5d4bb17 | 17 | #include <linux/sched.h> |
6613c5e8 | 18 | #include <linux/seq_file.h> |
c5d4bb17 JD |
19 | #include <linux/tick.h> |
20 | #include <linux/threads.h> | |
21 | #include <asm/current.h> | |
22 | #include <asm/pgtable.h> | |
445c5786 | 23 | #include <asm/mmu_context.h> |
c5d4bb17 | 24 | #include <asm/uaccess.h> |
4ff83ce1 | 25 | #include "as-layout.h" |
ba180fd4 | 26 | #include "kern_util.h" |
1da177e4 | 27 | #include "os.h" |
77bf4400 | 28 | #include "skas.h" |
1da177e4 | 29 | |
ba180fd4 JD |
30 | /* |
31 | * This is a per-cpu array. A processor only modifies its entry and it only | |
1da177e4 LT |
32 | * cares about its entry, so it's OK if another processor is modifying its |
33 | * entry. | |
34 | */ | |
35 | struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | |
36 | ||
2dc5802a | 37 | static inline int external_pid(void) |
1da177e4 | 38 | { |
77bf4400 | 39 | /* FIXME: Need to look up userspace_pid by cpu */ |
ba180fd4 | 40 | return userspace_pid[0]; |
1da177e4 LT |
41 | } |
42 | ||
43 | int pid_to_processor_id(int pid) | |
44 | { | |
45 | int i; | |
46 | ||
c5d4bb17 | 47 | for (i = 0; i < ncpus; i++) { |
ba180fd4 | 48 | if (cpu_tasks[i].pid == pid) |
6e21aec3 | 49 | return i; |
1da177e4 | 50 | } |
6e21aec3 | 51 | return -1; |
1da177e4 LT |
52 | } |
53 | ||
54 | void free_stack(unsigned long stack, int order) | |
55 | { | |
56 | free_pages(stack, order); | |
57 | } | |
58 | ||
59 | unsigned long alloc_stack(int order, int atomic) | |
60 | { | |
61 | unsigned long page; | |
53f9fc93 | 62 | gfp_t flags = GFP_KERNEL; |
1da177e4 | 63 | |
46db4a42 PBG |
64 | if (atomic) |
65 | flags = GFP_ATOMIC; | |
1da177e4 | 66 | page = __get_free_pages(flags, order); |
5c8aacea | 67 | |
6e21aec3 | 68 | return page; |
1da177e4 LT |
69 | } |
70 | ||
71 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
72 | { | |
73 | int pid; | |
74 | ||
75 | current->thread.request.u.thread.proc = fn; | |
76 | current->thread.request.u.thread.arg = arg; | |
e0877f07 JD |
77 | pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, |
78 | ¤t->thread.regs, 0, NULL, NULL); | |
6e21aec3 | 79 | return pid; |
1da177e4 | 80 | } |
73395a00 | 81 | EXPORT_SYMBOL(kernel_thread); |
1da177e4 | 82 | |
6e21aec3 | 83 | static inline void set_current(struct task_struct *task) |
1da177e4 | 84 | { |
ca9bc0bb | 85 | cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) |
2dc5802a | 86 | { external_pid(), task }); |
1da177e4 LT |
87 | } |
88 | ||
291248fd | 89 | extern void arch_switch_to(struct task_struct *to); |
77bf4400 | 90 | |
1da177e4 LT |
91 | void *_switch_to(void *prev, void *next, void *last) |
92 | { | |
995473ae | 93 | struct task_struct *from = prev; |
291248fd | 94 | struct task_struct *to = next; |
f6e34c6a | 95 | |
995473ae JD |
96 | to->thread.prev_sched = from; |
97 | set_current(to); | |
f6e34c6a | 98 | |
3eddddcf | 99 | do { |
6aa802ce | 100 | current->thread.saved_task = NULL; |
77bf4400 | 101 | |
c5d4bb17 JD |
102 | switch_threads(&from->thread.switch_buf, |
103 | &to->thread.switch_buf); | |
77bf4400 | 104 | |
291248fd | 105 | arch_switch_to(current); |
77bf4400 | 106 | |
ba180fd4 | 107 | if (current->thread.saved_task) |
3eddddcf | 108 | show_regs(&(current->thread.regs)); |
c5d4bb17 JD |
109 | to = current->thread.saved_task; |
110 | from = current; | |
291248fd | 111 | } while (current->thread.saved_task); |
f6e34c6a | 112 | |
6e21aec3 | 113 | return current->thread.prev_sched; |
f6e34c6a | 114 | |
1da177e4 LT |
115 | } |
116 | ||
117 | void interrupt_end(void) | |
118 | { | |
ba180fd4 | 119 | if (need_resched()) |
6e21aec3 | 120 | schedule(); |
ba180fd4 | 121 | if (test_tsk_thread_flag(current, TIF_SIGPENDING)) |
6e21aec3 | 122 | do_signal(); |
1da177e4 LT |
123 | } |
124 | ||
1da177e4 LT |
125 | void exit_thread(void) |
126 | { | |
1da177e4 | 127 | } |
995473ae | 128 | |
1da177e4 LT |
129 | void *get_current(void) |
130 | { | |
6e21aec3 | 131 | return current; |
1da177e4 LT |
132 | } |
133 | ||
ba180fd4 JD |
134 | /* |
135 | * This is called magically, by its address being stuffed in a jmp_buf | |
77bf4400 JD |
136 | * and being longjmp-d to. |
137 | */ | |
138 | void new_thread_handler(void) | |
139 | { | |
140 | int (*fn)(void *), n; | |
141 | void *arg; | |
142 | ||
ba180fd4 | 143 | if (current->thread.prev_sched != NULL) |
77bf4400 JD |
144 | schedule_tail(current->thread.prev_sched); |
145 | current->thread.prev_sched = NULL; | |
146 | ||
147 | fn = current->thread.request.u.thread.proc; | |
148 | arg = current->thread.request.u.thread.arg; | |
149 | ||
ba180fd4 JD |
150 | /* |
151 | * The return value is 1 if the kernel thread execs a process, | |
77bf4400 JD |
152 | * 0 if it just exits |
153 | */ | |
154 | n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); | |
ba180fd4 | 155 | if (n == 1) { |
77bf4400 JD |
156 | /* Handle any immediate reschedules or signals */ |
157 | interrupt_end(); | |
158 | userspace(¤t->thread.regs.regs); | |
159 | } | |
160 | else do_exit(0); | |
161 | } | |
162 | ||
163 | /* Called magically, see new_thread_handler above */ | |
164 | void fork_handler(void) | |
165 | { | |
166 | force_flush_all(); | |
77bf4400 JD |
167 | |
168 | schedule_tail(current->thread.prev_sched); | |
169 | ||
ba180fd4 JD |
170 | /* |
171 | * XXX: if interrupt_end() calls schedule, this call to | |
77bf4400 | 172 | * arch_switch_to isn't needed. We could want to apply this to |
ba180fd4 JD |
173 | * improve performance. -bb |
174 | */ | |
291248fd | 175 | arch_switch_to(current); |
77bf4400 JD |
176 | |
177 | current->thread.prev_sched = NULL; | |
178 | ||
179 | /* Handle any immediate reschedules or signals */ | |
180 | interrupt_end(); | |
181 | ||
182 | userspace(¤t->thread.regs.regs); | |
183 | } | |
184 | ||
6f2c55b8 | 185 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
995473ae | 186 | unsigned long stack_top, struct task_struct * p, |
1da177e4 LT |
187 | struct pt_regs *regs) |
188 | { | |
77bf4400 JD |
189 | void (*handler)(void); |
190 | int ret = 0; | |
aa6758d4 | 191 | |
1da177e4 | 192 | p->thread = (struct thread_struct) INIT_THREAD; |
aa6758d4 | 193 | |
ba180fd4 | 194 | if (current->thread.forking) { |
77bf4400 JD |
195 | memcpy(&p->thread.regs.regs, ®s->regs, |
196 | sizeof(p->thread.regs.regs)); | |
18baddda | 197 | REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0); |
ba180fd4 | 198 | if (sp != 0) |
18baddda | 199 | REGS_SP(p->thread.regs.regs.gp) = sp; |
aa6758d4 | 200 | |
77bf4400 | 201 | handler = fork_handler; |
aa6758d4 | 202 | |
77bf4400 JD |
203 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); |
204 | } | |
205 | else { | |
fbfe9c84 | 206 | get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); |
77bf4400 JD |
207 | p->thread.request.u.thread = current->thread.request.u.thread; |
208 | handler = new_thread_handler; | |
209 | } | |
210 | ||
211 | new_thread(task_stack_page(p), &p->thread.switch_buf, handler); | |
212 | ||
213 | if (current->thread.forking) { | |
214 | clear_flushed_tls(p); | |
215 | ||
216 | /* | |
217 | * Set a new TLS for the child thread? | |
218 | */ | |
219 | if (clone_flags & CLONE_SETTLS) | |
220 | ret = arch_copy_tls(p); | |
221 | } | |
aa6758d4 | 222 | |
aa6758d4 | 223 | return ret; |
1da177e4 LT |
224 | } |
225 | ||
226 | void initial_thread_cb(void (*proc)(void *), void *arg) | |
227 | { | |
228 | int save_kmalloc_ok = kmalloc_ok; | |
229 | ||
230 | kmalloc_ok = 0; | |
6aa802ce | 231 | initial_thread_cb_skas(proc, arg); |
1da177e4 LT |
232 | kmalloc_ok = save_kmalloc_ok; |
233 | } | |
995473ae | 234 | |
1da177e4 LT |
235 | void default_idle(void) |
236 | { | |
b160fb63 JD |
237 | unsigned long long nsecs; |
238 | ||
c5d4bb17 | 239 | while (1) { |
1da177e4 | 240 | /* endless idle loop with no priority at all */ |
1da177e4 LT |
241 | |
242 | /* | |
243 | * although we are an idle CPU, we do not want to | |
244 | * get into the scheduler unnecessarily. | |
245 | */ | |
ba180fd4 | 246 | if (need_resched()) |
1da177e4 | 247 | schedule(); |
995473ae | 248 | |
2bbb6817 | 249 | tick_nohz_idle_enter_norcu(); |
b160fb63 JD |
250 | nsecs = disable_timer(); |
251 | idle_sleep(nsecs); | |
2bbb6817 | 252 | tick_nohz_idle_exit_norcu(); |
1da177e4 LT |
253 | } |
254 | } | |
255 | ||
256 | void cpu_idle(void) | |
257 | { | |
a5a678c8 | 258 | cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); |
77bf4400 | 259 | default_idle(); |
1da177e4 LT |
260 | } |
261 | ||
b6316293 PBG |
262 | int __cant_sleep(void) { |
263 | return in_atomic() || irqs_disabled() || in_interrupt(); | |
264 | /* Is in_interrupt() really needed? */ | |
1da177e4 LT |
265 | } |
266 | ||
1da177e4 LT |
267 | int user_context(unsigned long sp) |
268 | { | |
269 | unsigned long stack; | |
270 | ||
271 | stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); | |
a5a678c8 | 272 | return stack != (unsigned long) current_thread_info(); |
1da177e4 LT |
273 | } |
274 | ||
1da177e4 LT |
275 | extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; |
276 | ||
277 | void do_uml_exitcalls(void) | |
278 | { | |
279 | exitcall_t *call; | |
280 | ||
281 | call = &__uml_exitcall_end; | |
282 | while (--call >= &__uml_exitcall_begin) | |
283 | (*call)(); | |
284 | } | |
285 | ||
c0a9290e | 286 | char *uml_strdup(const char *string) |
1da177e4 | 287 | { |
dfe52244 | 288 | return kstrdup(string, GFP_KERNEL); |
1da177e4 | 289 | } |
73395a00 | 290 | EXPORT_SYMBOL(uml_strdup); |
1da177e4 | 291 | |
1da177e4 LT |
292 | int copy_to_user_proc(void __user *to, void *from, int size) |
293 | { | |
6e21aec3 | 294 | return copy_to_user(to, from, size); |
1da177e4 LT |
295 | } |
296 | ||
297 | int copy_from_user_proc(void *to, void __user *from, int size) | |
298 | { | |
6e21aec3 | 299 | return copy_from_user(to, from, size); |
1da177e4 LT |
300 | } |
301 | ||
302 | int clear_user_proc(void __user *buf, int size) | |
303 | { | |
6e21aec3 | 304 | return clear_user(buf, size); |
1da177e4 LT |
305 | } |
306 | ||
307 | int strlen_user_proc(char __user *str) | |
308 | { | |
6e21aec3 | 309 | return strlen_user(str); |
1da177e4 LT |
310 | } |
311 | ||
312 | int smp_sigio_handler(void) | |
313 | { | |
314 | #ifdef CONFIG_SMP | |
a5a678c8 | 315 | int cpu = current_thread_info()->cpu; |
1da177e4 | 316 | IPI_handler(cpu); |
ba180fd4 | 317 | if (cpu != 0) |
6e21aec3 | 318 | return 1; |
1da177e4 | 319 | #endif |
6e21aec3 | 320 | return 0; |
1da177e4 LT |
321 | } |
322 | ||
1da177e4 LT |
323 | int cpu(void) |
324 | { | |
a5a678c8 | 325 | return current_thread_info()->cpu; |
1da177e4 LT |
326 | } |
327 | ||
328 | static atomic_t using_sysemu = ATOMIC_INIT(0); | |
329 | int sysemu_supported; | |
330 | ||
331 | void set_using_sysemu(int value) | |
332 | { | |
333 | if (value > sysemu_supported) | |
334 | return; | |
335 | atomic_set(&using_sysemu, value); | |
336 | } | |
337 | ||
338 | int get_using_sysemu(void) | |
339 | { | |
340 | return atomic_read(&using_sysemu); | |
341 | } | |
342 | ||
6613c5e8 | 343 | static int sysemu_proc_show(struct seq_file *m, void *v) |
1da177e4 | 344 | { |
6613c5e8 AD |
345 | seq_printf(m, "%d\n", get_using_sysemu()); |
346 | return 0; | |
347 | } | |
1da177e4 | 348 | |
6613c5e8 AD |
349 | static int sysemu_proc_open(struct inode *inode, struct file *file) |
350 | { | |
351 | return single_open(file, sysemu_proc_show, NULL); | |
1da177e4 LT |
352 | } |
353 | ||
6613c5e8 AD |
354 | static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, |
355 | size_t count, loff_t *pos) | |
1da177e4 LT |
356 | { |
357 | char tmp[2]; | |
358 | ||
359 | if (copy_from_user(tmp, buf, 1)) | |
360 | return -EFAULT; | |
361 | ||
362 | if (tmp[0] >= '0' && tmp[0] <= '2') | |
363 | set_using_sysemu(tmp[0] - '0'); | |
ba180fd4 JD |
364 | /* We use the first char, but pretend to write everything */ |
365 | return count; | |
1da177e4 LT |
366 | } |
367 | ||
6613c5e8 AD |
368 | static const struct file_operations sysemu_proc_fops = { |
369 | .owner = THIS_MODULE, | |
370 | .open = sysemu_proc_open, | |
371 | .read = seq_read, | |
372 | .llseek = seq_lseek, | |
373 | .release = single_release, | |
374 | .write = sysemu_proc_write, | |
375 | }; | |
376 | ||
1da177e4 LT |
377 | int __init make_proc_sysemu(void) |
378 | { | |
379 | struct proc_dir_entry *ent; | |
380 | if (!sysemu_supported) | |
381 | return 0; | |
382 | ||
6613c5e8 | 383 | ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); |
1da177e4 LT |
384 | |
385 | if (ent == NULL) | |
386 | { | |
30f417c6 | 387 | printk(KERN_WARNING "Failed to register /proc/sysemu\n"); |
6e21aec3 | 388 | return 0; |
1da177e4 LT |
389 | } |
390 | ||
1da177e4 LT |
391 | return 0; |
392 | } | |
393 | ||
394 | late_initcall(make_proc_sysemu); | |
395 | ||
396 | int singlestepping(void * t) | |
397 | { | |
398 | struct task_struct *task = t ? t : current; | |
399 | ||
c5d4bb17 | 400 | if (!(task->ptrace & PT_DTRACE)) |
ba180fd4 | 401 | return 0; |
1da177e4 LT |
402 | |
403 | if (task->thread.singlestep_syscall) | |
ba180fd4 | 404 | return 1; |
1da177e4 LT |
405 | |
406 | return 2; | |
407 | } | |
408 | ||
b8bd0220 BS |
409 | /* |
410 | * Only x86 and x86_64 have an arch_align_stack(). | |
411 | * All other arches have "#define arch_align_stack(x) (x)" | |
412 | * in their asm/system.h | |
413 | * As this is included in UML from asm-um/system-generic.h, | |
414 | * we can use it to behave as the subarch does. | |
415 | */ | |
416 | #ifndef arch_align_stack | |
1da177e4 LT |
417 | unsigned long arch_align_stack(unsigned long sp) |
418 | { | |
8f80e946 | 419 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
420 | sp -= get_random_int() % 8192; |
421 | return sp & ~0xf; | |
422 | } | |
b8bd0220 | 423 | #endif |
c1127465 JD |
424 | |
425 | unsigned long get_wchan(struct task_struct *p) | |
426 | { | |
427 | unsigned long stack_page, sp, ip; | |
428 | bool seen_sched = 0; | |
429 | ||
430 | if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) | |
431 | return 0; | |
432 | ||
433 | stack_page = (unsigned long) task_stack_page(p); | |
434 | /* Bail if the process has no kernel stack for some reason */ | |
435 | if (stack_page == 0) | |
436 | return 0; | |
437 | ||
438 | sp = p->thread.switch_buf->JB_SP; | |
439 | /* | |
440 | * Bail if the stack pointer is below the bottom of the kernel | |
441 | * stack for some reason | |
442 | */ | |
443 | if (sp < stack_page) | |
444 | return 0; | |
445 | ||
446 | while (sp < stack_page + THREAD_SIZE) { | |
447 | ip = *((unsigned long *) sp); | |
448 | if (in_sched_functions(ip)) | |
449 | /* Ignore everything until we're above the scheduler */ | |
450 | seen_sched = 1; | |
451 | else if (kernel_text_address(ip) && seen_sched) | |
452 | return ip; | |
453 | ||
454 | sp += sizeof(unsigned long); | |
455 | } | |
456 | ||
457 | return 0; | |
458 | } | |
8192ab42 JD |
459 | |
460 | int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) | |
461 | { | |
462 | int cpu = current_thread_info()->cpu; | |
463 | ||
464 | return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu); | |
465 | } | |
466 |