Commit | Line | Data |
---|---|---|
4b30f965 RK |
1 | /* |
2 | * Process creation support for Hexagon | |
3 | * | |
e1858b2a | 4 | * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. |
4b30f965 RK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 and | |
8 | * only version 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
18 | * 02110-1301, USA. | |
19 | */ | |
20 | ||
21 | #include <linux/sched.h> | |
22 | #include <linux/types.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/tick.h> | |
25 | #include <linux/uaccess.h> | |
26 | #include <linux/slab.h> | |
27 | ||
4b30f965 RK |
28 | /* |
29 | * Program thread launch. Often defined as a macro in processor.h, | |
30 | * but we're shooting for a small footprint and it's not an inner-loop | |
31 | * performance-critical operation. | |
32 | * | |
33 | * The Hexagon ABI specifies that R28 is zero'ed before program launch, | |
34 | * so that gets automatically done here. If we ever stop doing that here, | |
35 | * we'll probably want to define the ELF_PLAT_INIT macro. | |
36 | */ | |
37 | void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) | |
38 | { | |
39 | /* Set to run with user-mode data segmentation */ | |
40 | set_fs(USER_DS); | |
41 | /* We want to zero all data-containing registers. Is this overkill? */ | |
42 | memset(regs, 0, sizeof(*regs)); | |
43 | /* We might want to also zero all Processor registers here */ | |
44 | pt_set_usermode(regs); | |
45 | pt_set_elr(regs, pc); | |
46 | pt_set_rte_sp(regs, sp); | |
47 | } | |
48 | ||
49 | /* | |
50 | * Spin, or better still, do a hardware or VM wait instruction | |
51 | * If hardware or VM offer wait termination even though interrupts | |
52 | * are disabled. | |
53 | */ | |
54 | static void default_idle(void) | |
55 | { | |
56 | __vmwait(); | |
57 | } | |
58 | ||
59 | void (*idle_sleep)(void) = default_idle; | |
60 | ||
61 | void cpu_idle(void) | |
62 | { | |
63 | while (1) { | |
e8e42a5c | 64 | tick_nohz_idle_enter(); |
4b30f965 RK |
65 | local_irq_disable(); |
66 | while (!need_resched()) { | |
67 | idle_sleep(); | |
68 | /* interrupts wake us up, but aren't serviced */ | |
69 | local_irq_enable(); /* service interrupt */ | |
70 | local_irq_disable(); | |
71 | } | |
72 | local_irq_enable(); | |
e8e42a5c | 73 | tick_nohz_idle_exit(); |
4b30f965 RK |
74 | schedule(); |
75 | } | |
76 | } | |
77 | ||
78 | /* | |
79 | * Return saved PC of a blocked thread | |
80 | */ | |
81 | unsigned long thread_saved_pc(struct task_struct *tsk) | |
82 | { | |
83 | return 0; | |
84 | } | |
85 | ||
86 | /* | |
87 | * Copy architecture-specific thread state | |
88 | */ | |
89 | int copy_thread(unsigned long clone_flags, unsigned long usp, | |
afa86fc4 | 90 | unsigned long arg, struct task_struct *p) |
4b30f965 RK |
91 | { |
92 | struct thread_info *ti = task_thread_info(p); | |
93 | struct hexagon_switch_stack *ss; | |
94 | struct pt_regs *childregs; | |
95 | asmlinkage void ret_from_fork(void); | |
96 | ||
97 | childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) - | |
98 | sizeof(*childregs)); | |
99 | ||
4b30f965 RK |
100 | ti->regs = childregs; |
101 | ||
102 | /* | |
103 | * Establish kernel stack pointer and initial PC for new thread | |
99521855 AV |
104 | * Note that unlike the usual situation, we do not copy the |
105 | * parent's callee-saved here; those are in pt_regs and whatever | |
106 | * we leave here will be overridden on return to userland. | |
4b30f965 RK |
107 | */ |
108 | ss = (struct hexagon_switch_stack *) ((unsigned long) childregs - | |
109 | sizeof(*ss)); | |
110 | ss->lr = (unsigned long)ret_from_fork; | |
111 | p->thread.switch_sp = ss; | |
99521855 AV |
112 | if (unlikely(p->flags & PF_KTHREAD)) { |
113 | memset(childregs, 0, sizeof(struct pt_regs)); | |
114 | /* r24 <- fn, r25 <- arg */ | |
115 | ss->r2524 = usp | ((u64)arg << 32); | |
116 | pt_set_kmode(childregs); | |
117 | return 0; | |
4b30f965 | 118 | } |
f01aceac | 119 | memcpy(childregs, current_pt_regs(), sizeof(*childregs)); |
99521855 AV |
120 | ss->r2524 = 0; |
121 | ||
f01aceac AV |
122 | if (usp) |
123 | pt_set_rte_sp(childregs, usp); | |
99521855 AV |
124 | |
125 | /* Child sees zero return value */ | |
126 | childregs->r00 = 0; | |
127 | ||
128 | /* | |
129 | * The clone syscall has the C signature: | |
130 | * int [r0] clone(int flags [r0], | |
131 | * void *child_frame [r1], | |
132 | * void *parent_tid [r2], | |
133 | * void *child_tid [r3], | |
134 | * void *thread_control_block [r4]); | |
135 | * ugp is used to provide TLS support. | |
136 | */ | |
137 | if (clone_flags & CLONE_SETTLS) | |
138 | childregs->ugp = childregs->r04; | |
4b30f965 RK |
139 | |
140 | /* | |
99521855 AV |
141 | * Parent sees new pid -- not necessary, not even possible at |
142 | * this point in the fork process | |
143 | * Might also want to set things like ti->addr_limit | |
4b30f965 | 144 | */ |
4b30f965 RK |
145 | |
146 | return 0; | |
147 | } | |
148 | ||
149 | /* | |
150 | * Release any architecture-specific resources locked by thread | |
151 | */ | |
152 | void release_thread(struct task_struct *dead_task) | |
153 | { | |
154 | } | |
155 | ||
156 | /* | |
157 | * Free any architecture-specific thread data structures, etc. | |
158 | */ | |
159 | void exit_thread(void) | |
160 | { | |
161 | } | |
162 | ||
163 | /* | |
164 | * Some archs flush debug and FPU info here | |
165 | */ | |
166 | void flush_thread(void) | |
167 | { | |
168 | } | |
169 | ||
170 | /* | |
171 | * The "wait channel" terminology is archaic, but what we want | |
172 | * is an identification of the point at which the scheduler | |
173 | * was invoked by a blocked thread. | |
174 | */ | |
175 | unsigned long get_wchan(struct task_struct *p) | |
176 | { | |
177 | unsigned long fp, pc; | |
178 | unsigned long stack_page; | |
179 | int count = 0; | |
180 | if (!p || p == current || p->state == TASK_RUNNING) | |
181 | return 0; | |
182 | ||
183 | stack_page = (unsigned long)task_stack_page(p); | |
184 | fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp; | |
185 | do { | |
186 | if (fp < (stack_page + sizeof(struct thread_info)) || | |
187 | fp >= (THREAD_SIZE - 8 + stack_page)) | |
188 | return 0; | |
189 | pc = ((unsigned long *)fp)[1]; | |
190 | if (!in_sched_functions(pc)) | |
191 | return pc; | |
192 | fp = *(unsigned long *) fp; | |
193 | } while (count++ < 16); | |
194 | ||
195 | return 0; | |
196 | } | |
197 | ||
4b30f965 RK |
198 | /* |
199 | * Required placeholder. | |
200 | */ | |
201 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |
202 | { | |
203 | return 0; | |
204 | } |