cfb657e9284912576ebffc7b8b983e6c369b4075
[deliverable/linux.git] / arch / um / kernel / process.c
1 /*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7 #include <linux/stddef.h>
8 #include <linux/err.h>
9 #include <linux/hardirq.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/personality.h>
13 #include <linux/proc_fs.h>
14 #include <linux/ptrace.h>
15 #include <linux/random.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/tick.h>
20 #include <linux/threads.h>
21 #include <asm/current.h>
22 #include <asm/pgtable.h>
23 #include <asm/mmu_context.h>
24 #include <asm/uaccess.h>
25 #include "as-layout.h"
26 #include "kern_util.h"
27 #include "os.h"
28 #include "skas.h"
29
30 /*
31 * This is a per-cpu array. A processor only modifies its entry and it only
32 * cares about its entry, so it's OK if another processor is modifying its
33 * entry.
34 */
35 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
36
37 static inline int external_pid(void)
38 {
39 /* FIXME: Need to look up userspace_pid by cpu */
40 return userspace_pid[0];
41 }
42
43 int pid_to_processor_id(int pid)
44 {
45 int i;
46
47 for (i = 0; i < ncpus; i++) {
48 if (cpu_tasks[i].pid == pid)
49 return i;
50 }
51 return -1;
52 }
53
54 void free_stack(unsigned long stack, int order)
55 {
56 free_pages(stack, order);
57 }
58
59 unsigned long alloc_stack(int order, int atomic)
60 {
61 unsigned long page;
62 gfp_t flags = GFP_KERNEL;
63
64 if (atomic)
65 flags = GFP_ATOMIC;
66 page = __get_free_pages(flags, order);
67
68 return page;
69 }
70
71 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
72 {
73 int pid;
74
75 current->thread.request.u.thread.proc = fn;
76 current->thread.request.u.thread.arg = arg;
77 pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
78 &current->thread.regs, 0, NULL, NULL);
79 return pid;
80 }
81 EXPORT_SYMBOL(kernel_thread);
82
83 static inline void set_current(struct task_struct *task)
84 {
85 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
86 { external_pid(), task });
87 }
88
89 extern void arch_switch_to(struct task_struct *to);
90
91 void *_switch_to(void *prev, void *next, void *last)
92 {
93 struct task_struct *from = prev;
94 struct task_struct *to = next;
95
96 to->thread.prev_sched = from;
97 set_current(to);
98
99 do {
100 current->thread.saved_task = NULL;
101
102 switch_threads(&from->thread.switch_buf,
103 &to->thread.switch_buf);
104
105 arch_switch_to(current);
106
107 if (current->thread.saved_task)
108 show_regs(&(current->thread.regs));
109 to = current->thread.saved_task;
110 from = current;
111 } while (current->thread.saved_task);
112
113 return current->thread.prev_sched;
114
115 }
116
117 void interrupt_end(void)
118 {
119 if (need_resched())
120 schedule();
121 if (test_tsk_thread_flag(current, TIF_SIGPENDING))
122 do_signal();
123 }
124
125 void exit_thread(void)
126 {
127 }
128
129 void *get_current(void)
130 {
131 return current;
132 }
133
134 /*
135 * This is called magically, by its address being stuffed in a jmp_buf
136 * and being longjmp-d to.
137 */
138 void new_thread_handler(void)
139 {
140 int (*fn)(void *), n;
141 void *arg;
142
143 if (current->thread.prev_sched != NULL)
144 schedule_tail(current->thread.prev_sched);
145 current->thread.prev_sched = NULL;
146
147 fn = current->thread.request.u.thread.proc;
148 arg = current->thread.request.u.thread.arg;
149
150 /*
151 * The return value is 1 if the kernel thread execs a process,
152 * 0 if it just exits
153 */
154 n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
155 if (n == 1) {
156 /* Handle any immediate reschedules or signals */
157 interrupt_end();
158 userspace(&current->thread.regs.regs);
159 }
160 else do_exit(0);
161 }
162
163 /* Called magically, see new_thread_handler above */
164 void fork_handler(void)
165 {
166 force_flush_all();
167
168 schedule_tail(current->thread.prev_sched);
169
170 /*
171 * XXX: if interrupt_end() calls schedule, this call to
172 * arch_switch_to isn't needed. We could want to apply this to
173 * improve performance. -bb
174 */
175 arch_switch_to(current);
176
177 current->thread.prev_sched = NULL;
178
179 /* Handle any immediate reschedules or signals */
180 interrupt_end();
181
182 userspace(&current->thread.regs.regs);
183 }
184
185 int copy_thread(unsigned long clone_flags, unsigned long sp,
186 unsigned long stack_top, struct task_struct * p,
187 struct pt_regs *regs)
188 {
189 void (*handler)(void);
190 int ret = 0;
191
192 p->thread = (struct thread_struct) INIT_THREAD;
193
194 if (current->thread.forking) {
195 memcpy(&p->thread.regs.regs, &regs->regs,
196 sizeof(p->thread.regs.regs));
197 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
198 if (sp != 0)
199 REGS_SP(p->thread.regs.regs.gp) = sp;
200
201 handler = fork_handler;
202
203 arch_copy_thread(&current->thread.arch, &p->thread.arch);
204 }
205 else {
206 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
207 p->thread.request.u.thread = current->thread.request.u.thread;
208 handler = new_thread_handler;
209 }
210
211 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
212
213 if (current->thread.forking) {
214 clear_flushed_tls(p);
215
216 /*
217 * Set a new TLS for the child thread?
218 */
219 if (clone_flags & CLONE_SETTLS)
220 ret = arch_copy_tls(p);
221 }
222
223 return ret;
224 }
225
226 void initial_thread_cb(void (*proc)(void *), void *arg)
227 {
228 int save_kmalloc_ok = kmalloc_ok;
229
230 kmalloc_ok = 0;
231 initial_thread_cb_skas(proc, arg);
232 kmalloc_ok = save_kmalloc_ok;
233 }
234
235 void default_idle(void)
236 {
237 unsigned long long nsecs;
238
239 while (1) {
240 /* endless idle loop with no priority at all */
241
242 /*
243 * although we are an idle CPU, we do not want to
244 * get into the scheduler unnecessarily.
245 */
246 if (need_resched())
247 schedule();
248
249 tick_nohz_idle_enter();
250 nsecs = disable_timer();
251 idle_sleep(nsecs);
252 tick_nohz_idle_exit();
253 }
254 }
255
256 void cpu_idle(void)
257 {
258 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
259 default_idle();
260 }
261
262 int __cant_sleep(void) {
263 return in_atomic() || irqs_disabled() || in_interrupt();
264 /* Is in_interrupt() really needed? */
265 }
266
267 int user_context(unsigned long sp)
268 {
269 unsigned long stack;
270
271 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
272 return stack != (unsigned long) current_thread_info();
273 }
274
275 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
276
277 void do_uml_exitcalls(void)
278 {
279 exitcall_t *call;
280
281 call = &__uml_exitcall_end;
282 while (--call >= &__uml_exitcall_begin)
283 (*call)();
284 }
285
286 char *uml_strdup(const char *string)
287 {
288 return kstrdup(string, GFP_KERNEL);
289 }
290 EXPORT_SYMBOL(uml_strdup);
291
292 int copy_to_user_proc(void __user *to, void *from, int size)
293 {
294 return copy_to_user(to, from, size);
295 }
296
297 int copy_from_user_proc(void *to, void __user *from, int size)
298 {
299 return copy_from_user(to, from, size);
300 }
301
302 int clear_user_proc(void __user *buf, int size)
303 {
304 return clear_user(buf, size);
305 }
306
307 int strlen_user_proc(char __user *str)
308 {
309 return strlen_user(str);
310 }
311
312 int smp_sigio_handler(void)
313 {
314 #ifdef CONFIG_SMP
315 int cpu = current_thread_info()->cpu;
316 IPI_handler(cpu);
317 if (cpu != 0)
318 return 1;
319 #endif
320 return 0;
321 }
322
323 int cpu(void)
324 {
325 return current_thread_info()->cpu;
326 }
327
328 static atomic_t using_sysemu = ATOMIC_INIT(0);
329 int sysemu_supported;
330
331 void set_using_sysemu(int value)
332 {
333 if (value > sysemu_supported)
334 return;
335 atomic_set(&using_sysemu, value);
336 }
337
338 int get_using_sysemu(void)
339 {
340 return atomic_read(&using_sysemu);
341 }
342
343 static int sysemu_proc_show(struct seq_file *m, void *v)
344 {
345 seq_printf(m, "%d\n", get_using_sysemu());
346 return 0;
347 }
348
349 static int sysemu_proc_open(struct inode *inode, struct file *file)
350 {
351 return single_open(file, sysemu_proc_show, NULL);
352 }
353
354 static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
355 size_t count, loff_t *pos)
356 {
357 char tmp[2];
358
359 if (copy_from_user(tmp, buf, 1))
360 return -EFAULT;
361
362 if (tmp[0] >= '0' && tmp[0] <= '2')
363 set_using_sysemu(tmp[0] - '0');
364 /* We use the first char, but pretend to write everything */
365 return count;
366 }
367
368 static const struct file_operations sysemu_proc_fops = {
369 .owner = THIS_MODULE,
370 .open = sysemu_proc_open,
371 .read = seq_read,
372 .llseek = seq_lseek,
373 .release = single_release,
374 .write = sysemu_proc_write,
375 };
376
377 int __init make_proc_sysemu(void)
378 {
379 struct proc_dir_entry *ent;
380 if (!sysemu_supported)
381 return 0;
382
383 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
384
385 if (ent == NULL)
386 {
387 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
388 return 0;
389 }
390
391 return 0;
392 }
393
394 late_initcall(make_proc_sysemu);
395
396 int singlestepping(void * t)
397 {
398 struct task_struct *task = t ? t : current;
399
400 if (!(task->ptrace & PT_DTRACE))
401 return 0;
402
403 if (task->thread.singlestep_syscall)
404 return 1;
405
406 return 2;
407 }
408
409 /*
410 * Only x86 and x86_64 have an arch_align_stack().
411 * All other arches have "#define arch_align_stack(x) (x)"
412 * in their asm/system.h
413 * As this is included in UML from asm-um/system-generic.h,
414 * we can use it to behave as the subarch does.
415 */
416 #ifndef arch_align_stack
417 unsigned long arch_align_stack(unsigned long sp)
418 {
419 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
420 sp -= get_random_int() % 8192;
421 return sp & ~0xf;
422 }
423 #endif
424
425 unsigned long get_wchan(struct task_struct *p)
426 {
427 unsigned long stack_page, sp, ip;
428 bool seen_sched = 0;
429
430 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
431 return 0;
432
433 stack_page = (unsigned long) task_stack_page(p);
434 /* Bail if the process has no kernel stack for some reason */
435 if (stack_page == 0)
436 return 0;
437
438 sp = p->thread.switch_buf->JB_SP;
439 /*
440 * Bail if the stack pointer is below the bottom of the kernel
441 * stack for some reason
442 */
443 if (sp < stack_page)
444 return 0;
445
446 while (sp < stack_page + THREAD_SIZE) {
447 ip = *((unsigned long *) sp);
448 if (in_sched_functions(ip))
449 /* Ignore everything until we're above the scheduler */
450 seen_sched = 1;
451 else if (kernel_text_address(ip) && seen_sched)
452 return ip;
453
454 sp += sizeof(unsigned long);
455 }
456
457 return 0;
458 }
459
460 int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
461 {
462 int cpu = current_thread_info()->cpu;
463
464 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
465 }
466
This page took 0.0478 seconds and 4 git commands to generate.