tile: support CONTEXT_TRACKING and thus NOHZ_FULL
[deliverable/linux.git] / arch / tile / include / asm / thread_info.h
CommitLineData
867e359b
CM
1/*
2 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
3 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _ASM_TILE_THREAD_INFO_H
17#define _ASM_TILE_THREAD_INFO_H
18
19#include <asm/processor.h>
20#include <asm/page.h>
21#ifndef __ASSEMBLY__
22
23/*
24 * Low level task data that assembly code needs immediate access to.
25 * The structure is placed at the bottom of the supervisor stack.
26 */
27struct thread_info {
28 struct task_struct *task; /* main task structure */
29 struct exec_domain *exec_domain; /* execution domain */
30 unsigned long flags; /* low level flags */
31 unsigned long status; /* thread-synchronous flags */
32 __u32 homecache_cpu; /* CPU we are homecached on */
33 __u32 cpu; /* current CPU */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36
37 mm_segment_t addr_limit; /* thread address space
38 (KERNEL_DS or USER_DS) */
867e359b
CM
39 struct single_step_state *step_state; /* single step state
40 (if non-zero) */
2f9ac29e
CM
41 int align_ctl; /* controls unaligned access */
42#ifdef __tilegx__
43 unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
44 void __user *unalign_jit_base; /* unalign fixup JIT base */
45#endif
867e359b
CM
46};
47
48/*
49 * macros/functions for gaining access to the thread information structure.
50 */
51#define INIT_THREAD_INFO(tsk) \
52{ \
53 .task = &tsk, \
54 .exec_domain = &default_exec_domain, \
55 .flags = 0, \
56 .cpu = 0, \
57 .preempt_count = INIT_PREEMPT_COUNT, \
58 .addr_limit = KERNEL_DS, \
0707ad30 59 .step_state = NULL, \
2f9ac29e 60 .align_ctl = 0, \
867e359b
CM
61}
62
63#define init_thread_info (init_thread_union.thread_info)
64#define init_stack (init_thread_union.stack)
65
66#endif /* !__ASSEMBLY__ */
67
68#if PAGE_SIZE < 8192
69#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
70#else
71#define THREAD_SIZE_ORDER (0)
72#endif
76c567fb 73#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
867e359b
CM
74
75#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
76#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)
77
78#define STACK_WARN (THREAD_SIZE/8)
79
80#ifndef __ASSEMBLY__
81
d909a81b
TG
82void arch_release_thread_info(struct thread_info *info);
83
867e359b
CM
84/* How to get the thread information struct from C. */
85register unsigned long stack_pointer __asm__("sp");
86
87#define current_thread_info() \
88 ((struct thread_info *)(stack_pointer & -THREAD_SIZE))
89
0707ad30
CM
90/* Sit on a nap instruction until interrupted. */
91extern void smp_nap(void);
92
ba159fd3 93/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */
0707ad30
CM
94extern void _cpu_idle(void);
95
867e359b
CM
96#else /* __ASSEMBLY__ */
97
fc327e26
CM
98/*
99 * How to get the thread information struct from assembly.
100 * Note that we use different macros since different architectures
101 * have different semantics in their "mm" instruction and we would
102 * like to guarantee that the macro expands to exactly one instruction.
103 */
867e359b 104#ifdef __tilegx__
fc327e26 105#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
867e359b
CM
106#else
107#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
108#endif
109
110#endif /* !__ASSEMBLY__ */
111
867e359b
CM
112/*
113 * Thread information flags that various assembly files may need to access.
114 * Keep flags accessed frequently in low bits, particular since it makes
115 * it easier to build constants in assembly.
116 */
117#define TIF_SIGPENDING 0 /* signal pending */
118#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
119#define TIF_SINGLESTEP 2 /* restore singlestep on return to
120 user mode */
121#define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */
122#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
123#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
124#define TIF_SECCOMP 6 /* secure computing */
125#define TIF_MEMDIE 7 /* OOM killer at work */
313ce674 126#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
ef567f25 127#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
b3a8cb18 128#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */
49e4e156 129#define TIF_NOHZ 11 /* in adaptive nohz mode */
867e359b
CM
130
131#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
132#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
133#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
134#define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB)
135#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
136#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
137#define _TIF_SECCOMP (1<<TIF_SECCOMP)
138#define _TIF_MEMDIE (1<<TIF_MEMDIE)
313ce674 139#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
ef567f25 140#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
b3a8cb18 141#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
49e4e156 142#define _TIF_NOHZ (1<<TIF_NOHZ)
867e359b
CM
143
144/* Work to do on any return to user space. */
145#define _TIF_ALLWORK_MASK \
49e4e156
CM
146 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | \
147 _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME | _TIF_NOHZ)
867e359b 148
ef567f25 149/* Work to do at syscall entry. */
49e4e156
CM
150#define _TIF_SYSCALL_ENTRY_WORK \
151 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
ef567f25
SM
152
153/* Work to do at syscall exit. */
154#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
155
867e359b
CM
156/*
157 * Thread-synchronous status.
158 *
159 * This is different from the flags in that nobody else
160 * ever touches our thread-synchronous status, so we don't
161 * have to worry about atomic accesses.
162 */
163#ifdef __tilegx__
164#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
165#endif
867e359b 166#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
867e359b 167
867e359b
CM
168#ifndef __ASSEMBLY__
169#define HAVE_SET_RESTORE_SIGMASK 1
170static inline void set_restore_sigmask(void)
171{
172 struct thread_info *ti = current_thread_info();
173 ti->status |= TS_RESTORE_SIGMASK;
edd63a27 174 WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
867e359b 175}
4ebefe3e
AV
176static inline void clear_restore_sigmask(void)
177{
178 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
179}
180static inline bool test_restore_sigmask(void)
181{
182 return current_thread_info()->status & TS_RESTORE_SIGMASK;
183}
184static inline bool test_and_clear_restore_sigmask(void)
185{
186 struct thread_info *ti = current_thread_info();
187 if (!(ti->status & TS_RESTORE_SIGMASK))
188 return false;
189 ti->status &= ~TS_RESTORE_SIGMASK;
190 return true;
191}
867e359b
CM
192#endif /* !__ASSEMBLY__ */
193
194#endif /* _ASM_TILE_THREAD_INFO_H */
This page took 0.234875 seconds and 5 git commands to generate.