nohz: Allow rcu extended quiescent state handling seperately from tick stop
[deliverable/linux.git] / arch / blackfin / kernel / process.c
CommitLineData
1394f032 1/*
96f1050d 2 * Blackfin architecture-dependent process handling
1394f032 3 *
96f1050d 4 * Copyright 2004-2009 Analog Devices Inc.
1394f032 5 *
96f1050d 6 * Licensed under the GPL-2 or later
1394f032
BW
7 */
8
9#include <linux/module.h>
1394f032
BW
10#include <linux/unistd.h>
11#include <linux/user.h>
1f83b8f1 12#include <linux/uaccess.h>
5a0e3ad6 13#include <linux/slab.h>
8b5f79f9
VM
14#include <linux/sched.h>
15#include <linux/tick.h>
d31c5ab1
BW
16#include <linux/fs.h>
17#include <linux/err.h>
1394f032
BW
18
19#include <asm/blackfin.h>
7adfb58f 20#include <asm/fixed_code.h>
dbc895f9 21#include <asm/mem_map.h>
1394f032 22
1394f032
BW
23asmlinkage void ret_from_fork(void);
24
25/* Points to the SDRAM backup memory for the stack that is currently in
26 * L1 scratchpad memory.
27 */
28void *current_l1_stack_save;
29
30/* The number of tasks currently using a L1 stack area. The SRAM is
31 * allocated/deallocated whenever this changes from/to zero.
32 */
33int nr_l1stack_tasks;
34
35/* Start and length of the area in L1 scratchpad memory which we've allocated
36 * for process stacks.
37 */
38void *l1_stack_base;
39unsigned long l1_stack_len;
40
41/*
42 * Powermanagement idle function, if any..
43 */
44void (*pm_idle)(void) = NULL;
45EXPORT_SYMBOL(pm_idle);
46
47void (*pm_power_off)(void) = NULL;
48EXPORT_SYMBOL(pm_power_off);
49
1394f032
BW
50/*
51 * The idle loop on BFIN
52 */
53#ifdef CONFIG_IDLE_L1
8b5f79f9 54static void default_idle(void)__attribute__((l1_text));
1394f032
BW
55void cpu_idle(void)__attribute__((l1_text));
56#endif
57
8b5f79f9
VM
58/*
59 * This is our default idle handler. We need to disable
60 * interrupts here to ensure we don't miss a wakeup call.
61 */
62static void default_idle(void)
1394f032 63{
6a01f230
YL
64#ifdef CONFIG_IPIPE
65 ipipe_suspend_domain();
66#endif
3b139cdb 67 hard_local_irq_disable();
8b5f79f9
VM
68 if (!need_resched())
69 idle_with_irq_disabled();
1394f032 70
3b139cdb 71 hard_local_irq_enable();
8b5f79f9 72}
1394f032
BW
73
74/*
8b5f79f9
VM
75 * The idle thread. We try to conserve power, while trying to keep
76 * overall latency low. The architecture specific idle is passed
77 * a value to indicate the level of "idleness" of the system.
1394f032
BW
78 */
79void cpu_idle(void)
80{
81 /* endless idle loop with no priority at all */
82 while (1) {
8b5f79f9
VM
83 void (*idle)(void) = pm_idle;
84
85#ifdef CONFIG_HOTPLUG_CPU
86 if (cpu_is_offline(smp_processor_id()))
87 cpu_die();
88#endif
89 if (!idle)
90 idle = default_idle;
2bbb6817 91 tick_nohz_idle_enter_norcu();
8b5f79f9
VM
92 while (!need_resched())
93 idle();
2bbb6817 94 tick_nohz_idle_exit_norcu();
1394f032
BW
95 preempt_enable_no_resched();
96 schedule();
97 preempt_disable();
98 }
99}
100
1394f032
BW
101/*
102 * This gets run with P1 containing the
103 * function to call, and R1 containing
104 * the "args". Note P0 is clobbered on the way here.
105 */
106void kernel_thread_helper(void);
107__asm__(".section .text\n"
108 ".align 4\n"
109 "_kernel_thread_helper:\n\t"
110 "\tsp += -12;\n\t"
111 "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");
112
113/*
114 * Create a kernel thread.
115 */
116pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
117{
118 struct pt_regs regs;
119
120 memset(&regs, 0, sizeof(regs));
121
122 regs.r1 = (unsigned long)arg;
123 regs.p1 = (unsigned long)fn;
124 regs.pc = (unsigned long)kernel_thread_helper;
125 regs.orig_p0 = -1;
126 /* Set bit 2 to tell ret_from_fork we should be returning to kernel
127 mode. */
128 regs.ipend = 0x8002;
129 __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
130 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
131 NULL);
132}
fe8015ce 133EXPORT_SYMBOL(kernel_thread);
1394f032 134
d5ce528c
MF
135/*
136 * Do necessary setup to start up a newly executed thread.
137 *
138 * pass the data segment into user programs if it exists,
139 * it can't hurt anything as far as I can tell
140 */
141void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
142{
d5ce528c
MF
143 regs->pc = new_ip;
144 if (current->mm)
145 regs->p5 = current->mm->start_data;
aa23531c 146#ifndef CONFIG_SMP
d5ce528c
MF
147 task_thread_info(current)->l1_task_info.stack_start =
148 (void *)current->mm->context.stack_start;
149 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
150 memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
151 sizeof(*L1_SCRATCH_TASK_INFO));
152#endif
153 wrusp(new_sp);
154}
155EXPORT_SYMBOL_GPL(start_thread);
156
1394f032
BW
157void flush_thread(void)
158{
159}
160
161asmlinkage int bfin_vfork(struct pt_regs *regs)
162{
163 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
164 NULL);
165}
166
167asmlinkage int bfin_clone(struct pt_regs *regs)
168{
169 unsigned long clone_flags;
170 unsigned long newsp;
171
8f65873e 172#ifdef __ARCH_SYNC_CORE_DCACHE
e887eb61
KM
173 if (current->rt.nr_cpus_allowed == num_possible_cpus())
174 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
8f65873e
GY
175#endif
176
1394f032
BW
177 /* syscall2 puts clone_flags in r0 and usp in r1 */
178 clone_flags = regs->r0;
179 newsp = regs->r1;
180 if (!newsp)
181 newsp = rdusp();
182 else
183 newsp -= 12;
184 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
185}
186
187int
6f2c55b8 188copy_thread(unsigned long clone_flags,
1394f032
BW
189 unsigned long usp, unsigned long topstk,
190 struct task_struct *p, struct pt_regs *regs)
191{
192 struct pt_regs *childregs;
193
194 childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
195 *childregs = *regs;
196 childregs->r0 = 0;
197
198 p->thread.usp = usp;
199 p->thread.ksp = (unsigned long)childregs;
200 p->thread.pc = (unsigned long)ret_from_fork;
201
202 return 0;
203}
204
1394f032
BW
205/*
206 * sys_execve() executes a new program.
207 */
d7627467
DH
208asmlinkage int sys_execve(const char __user *name,
209 const char __user *const __user *argv,
210 const char __user *const __user *envp)
1394f032
BW
211{
212 int error;
213 char *filename;
214 struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
215
1394f032
BW
216 filename = getname(name);
217 error = PTR_ERR(filename);
218 if (IS_ERR(filename))
25708a5f 219 return error;
1394f032
BW
220 error = do_execve(filename, argv, envp, regs);
221 putname(filename);
1394f032
BW
222 return error;
223}
224
225unsigned long get_wchan(struct task_struct *p)
226{
227 unsigned long fp, pc;
228 unsigned long stack_page;
229 int count = 0;
230 if (!p || p == current || p->state == TASK_RUNNING)
231 return 0;
232
233 stack_page = (unsigned long)p;
234 fp = p->thread.usp;
235 do {
236 if (fp < stack_page + sizeof(struct thread_info) ||
237 fp >= 8184 + stack_page)
238 return 0;
239 pc = ((unsigned long *)fp)[1];
240 if (!in_sched_functions(pc))
241 return pc;
242 fp = *(unsigned long *)fp;
243 }
244 while (count++ < 16);
245 return 0;
246}
247
7adfb58f
BS
248void finish_atomic_sections (struct pt_regs *regs)
249{
19d6d7d5 250 int __user *up0 = (int __user *)regs->p0;
0ddeeca2 251
7adfb58f 252 switch (regs->pc) {
2f5a0864
MF
253 default:
254 /* not in middle of an atomic step, so resume like normal */
255 return;
256
7adfb58f 257 case ATOMIC_XCHG32 + 2:
0ddeeca2 258 put_user(regs->r1, up0);
7adfb58f
BS
259 break;
260
261 case ATOMIC_CAS32 + 2:
262 case ATOMIC_CAS32 + 4:
263 if (regs->r0 == regs->r1)
92649494 264 case ATOMIC_CAS32 + 6:
0ddeeca2 265 put_user(regs->r2, up0);
7adfb58f 266 break;
7adfb58f
BS
267
268 case ATOMIC_ADD32 + 2:
269 regs->r0 = regs->r1 + regs->r0;
270 /* fall through */
271 case ATOMIC_ADD32 + 4:
0ddeeca2 272 put_user(regs->r0, up0);
7adfb58f
BS
273 break;
274
275 case ATOMIC_SUB32 + 2:
276 regs->r0 = regs->r1 - regs->r0;
277 /* fall through */
278 case ATOMIC_SUB32 + 4:
0ddeeca2 279 put_user(regs->r0, up0);
7adfb58f
BS
280 break;
281
282 case ATOMIC_IOR32 + 2:
283 regs->r0 = regs->r1 | regs->r0;
284 /* fall through */
285 case ATOMIC_IOR32 + 4:
0ddeeca2 286 put_user(regs->r0, up0);
7adfb58f
BS
287 break;
288
289 case ATOMIC_AND32 + 2:
290 regs->r0 = regs->r1 & regs->r0;
291 /* fall through */
292 case ATOMIC_AND32 + 4:
0ddeeca2 293 put_user(regs->r0, up0);
7adfb58f
BS
294 break;
295
296 case ATOMIC_XOR32 + 2:
297 regs->r0 = regs->r1 ^ regs->r0;
298 /* fall through */
299 case ATOMIC_XOR32 + 4:
0ddeeca2 300 put_user(regs->r0, up0);
7adfb58f
BS
301 break;
302 }
2f5a0864
MF
303
304 /*
305 * We've finished the atomic section, and the only thing left for
306 * userspace is to do a RTS, so we might as well handle that too
307 * since we need to update the PC anyways.
308 */
309 regs->pc = regs->rets;
7adfb58f
BS
310}
311
e56e03b0
MF
312static inline
313int in_mem(unsigned long addr, unsigned long size,
314 unsigned long start, unsigned long end)
315{
316 return addr >= start && addr + size <= end;
317}
318static inline
319int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
320 unsigned long const_addr, unsigned long const_size)
321{
322 return const_size &&
323 in_mem(addr, size, const_addr + off, const_addr + const_size);
324}
325static inline
326int in_mem_const(unsigned long addr, unsigned long size,
327 unsigned long const_addr, unsigned long const_size)
328{
fb4b5d3a 329 return in_mem_const_off(addr, size, 0, const_addr, const_size);
e56e03b0 330}
13048f88 331#define ASYNC_ENABLED(bnum, bctlnum) \
e56e03b0 332({ \
13048f88
BS
333 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
334 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
335 1; \
e56e03b0 336})
13048f88
BS
337/*
338 * We can't read EBIU banks that aren't enabled or we end up hanging
339 * on the access to the async space. Make sure we validate accesses
340 * that cross async banks too.
341 * 0 - found, but unusable
342 * 1 - found & usable
343 * 2 - not found
344 */
345static
346int in_async(unsigned long addr, unsigned long size)
347{
348 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
349 if (!ASYNC_ENABLED(0, 0))
350 return 0;
351 if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
352 return 1;
353 size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
354 addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
355 }
356 if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
357 if (!ASYNC_ENABLED(1, 0))
358 return 0;
359 if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
360 return 1;
361 size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
362 addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
363 }
364 if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
365 if (!ASYNC_ENABLED(2, 1))
366 return 0;
367 if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
368 return 1;
369 size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
370 addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
371 }
372 if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
373 if (ASYNC_ENABLED(3, 1))
374 return 0;
375 if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
376 return 1;
377 return 0;
378 }
379
380 /* not within async bounds */
381 return 2;
382}
e56e03b0
MF
383
384int bfin_mem_access_type(unsigned long addr, unsigned long size)
385{
386 int cpu = raw_smp_processor_id();
387
388 /* Check that things do not wrap around */
389 if (addr > ULONG_MAX - size)
390 return -EFAULT;
391
392 if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
393 return BFIN_MEM_ACCESS_CORE;
394
395 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
396 return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
397 if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
398 return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
399 if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
400 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
401 if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
402 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
403#ifdef COREB_L1_CODE_START
fb4b5d3a 404 if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
e56e03b0
MF
405 return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
406 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
407 return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
fb4b5d3a 408 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
e56e03b0 409 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
fb4b5d3a 410 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
e56e03b0
MF
411 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
412#endif
413 if (in_mem_const(addr, size, L2_START, L2_LENGTH))
414 return BFIN_MEM_ACCESS_CORE;
415
416 if (addr >= SYSMMR_BASE)
417 return BFIN_MEM_ACCESS_CORE_ONLY;
418
13048f88
BS
419 switch (in_async(addr, size)) {
420 case 0: return -EFAULT;
421 case 1: return BFIN_MEM_ACCESS_CORE;
422 case 2: /* fall through */;
423 }
e56e03b0
MF
424
425 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
426 return BFIN_MEM_ACCESS_CORE;
427 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
428 return BFIN_MEM_ACCESS_DMA;
429
430 return -EFAULT;
431}
432
1394f032 433#if defined(CONFIG_ACCESS_CHECK)
a43b739f
MF
434#ifdef CONFIG_ACCESS_OK_L1
435__attribute__((l1_text))
436#endif
b03b08ba 437/* Return 1 if access to memory range is OK, 0 otherwise */
1394f032
BW
438int _access_ok(unsigned long addr, unsigned long size)
439{
13048f88
BS
440 int aret;
441
bc41bb11
BS
442 if (size == 0)
443 return 1;
e56e03b0
MF
444 /* Check that things do not wrap around */
445 if (addr > ULONG_MAX - size)
1394f032 446 return 0;
1f83b8f1 447 if (segment_eq(get_fs(), KERNEL_DS))
1394f032
BW
448 return 1;
449#ifdef CONFIG_MTD_UCLINUX
e56e03b0
MF
450 if (1)
451#else
452 if (0)
453#endif
454 {
455 if (in_mem(addr, size, memory_start, memory_end))
456 return 1;
457 if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
458 return 1;
459# ifndef CONFIG_ROMFS_ON_MTD
460 if (0)
461# endif
462 /* For XIP, allow user space to use pointers within the ROMFS. */
463 if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
464 return 1;
465 } else {
466 if (in_mem(addr, size, memory_start, physical_mem_end))
467 return 1;
468 }
469
470 if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
1394f032 471 return 1;
d5adb029 472
e56e03b0 473 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
d5adb029 474 return 1;
e56e03b0 475 if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
1394f032 476 return 1;
e56e03b0 477 if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
1394f032 478 return 1;
e56e03b0 479 if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
1394f032 480 return 1;
e56e03b0 481#ifdef COREB_L1_CODE_START
fb4b5d3a 482 if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
1394f032 483 return 1;
e56e03b0 484 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
1394f032 485 return 1;
fb4b5d3a 486 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
1394f032 487 return 1;
fb4b5d3a 488 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
b2c2f303 489 return 1;
1394f032 490#endif
13048f88 491
41c3e334
BS
492#ifndef CONFIG_EXCEPTION_L1_SCRATCH
493 if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
494 return 1;
495#endif
496
13048f88
BS
497 aret = in_async(addr, size);
498 if (aret < 2)
499 return aret;
500
e56e03b0
MF
501 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
502 return 1;
503
504 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
505 return 1;
506 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
507 return 1;
508
1394f032
BW
509 return 0;
510}
511EXPORT_SYMBOL(_access_ok);
512#endif /* CONFIG_ACCESS_CHECK */
This page took 0.564333 seconds and 5 git commands to generate.