Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __irq_h |
2 | #define __irq_h | |
3 | ||
4 | /* | |
5 | * Please do not include this file in generic code. There is currently | |
6 | * no requirement for any architecture to implement anything held | |
7 | * within this file. | |
8 | * | |
9 | * Thanks. --rmk | |
10 | */ | |
11 | ||
12 | #include <linux/config.h> | |
13 | ||
14 | #if !defined(CONFIG_ARCH_S390) | |
15 | ||
16 | #include <linux/linkage.h> | |
17 | #include <linux/cache.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/cpumask.h> | |
20 | ||
21 | #include <asm/irq.h> | |
22 | #include <asm/ptrace.h> | |
23 | ||
24 | /* | |
25 | * IRQ line status. | |
26 | */ | |
27 | #define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */ | |
28 | #define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */ | |
29 | #define IRQ_PENDING 4 /* IRQ pending - replay on enable */ | |
30 | #define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ | |
31 | #define IRQ_AUTODETECT 16 /* IRQ is being autodetected */ | |
32 | #define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ | |
33 | #define IRQ_LEVEL 64 /* IRQ level triggered */ | |
34 | #define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */ | |
f26fdd59 KW |
35 | #if defined(ARCH_HAS_IRQ_PER_CPU) |
36 | # define IRQ_PER_CPU 256 /* IRQ is per CPU */ | |
37 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | |
38 | #else | |
39 | # define CHECK_IRQ_PER_CPU(var) 0 | |
40 | #endif | |
1da177e4 LT |
41 | |
42 | /* | |
43 | * Interrupt controller descriptor. This is all we need | |
44 | * to describe about the low-level hardware. | |
45 | */ | |
46 | struct hw_interrupt_type { | |
47 | const char * typename; | |
48 | unsigned int (*startup)(unsigned int irq); | |
49 | void (*shutdown)(unsigned int irq); | |
50 | void (*enable)(unsigned int irq); | |
51 | void (*disable)(unsigned int irq); | |
52 | void (*ack)(unsigned int irq); | |
53 | void (*end)(unsigned int irq); | |
54 | void (*set_affinity)(unsigned int irq, cpumask_t dest); | |
b77d6adc PBG |
55 | /* Currently used only by UML, might disappear one day.*/ |
56 | #ifdef CONFIG_IRQ_RELEASE_METHOD | |
dbce706e | 57 | void (*release)(unsigned int irq, void *dev_id); |
b77d6adc | 58 | #endif |
1da177e4 LT |
59 | }; |
60 | ||
61 | typedef struct hw_interrupt_type hw_irq_controller; | |
62 | ||
63 | /* | |
64 | * This is the "IRQ descriptor", which contains various information | |
65 | * about the irq, including what kind of hardware handling it has, | |
66 | * whether it is disabled etc etc. | |
67 | * | |
68 | * Pad this out to 32 bytes for cache and indexing reasons. | |
69 | */ | |
70 | typedef struct irq_desc { | |
71 | hw_irq_controller *handler; | |
72 | void *handler_data; | |
73 | struct irqaction *action; /* IRQ action list */ | |
74 | unsigned int status; /* IRQ status */ | |
75 | unsigned int depth; /* nested irq disables */ | |
76 | unsigned int irq_count; /* For detecting broken interrupts */ | |
77 | unsigned int irqs_unhandled; | |
78 | spinlock_t lock; | |
54d5d424 AR |
79 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) |
80 | unsigned int move_irq; /* Flag need to re-target intr dest*/ | |
81 | #endif | |
1da177e4 LT |
82 | } ____cacheline_aligned irq_desc_t; |
83 | ||
84 | extern irq_desc_t irq_desc [NR_IRQS]; | |
85 | ||
54d5d424 AR |
86 | /* Return a pointer to the irq descriptor for IRQ. */ |
87 | static inline irq_desc_t * | |
88 | irq_descp (int irq) | |
89 | { | |
90 | return irq_desc + irq; | |
91 | } | |
92 | ||
1da177e4 LT |
93 | #include <asm/hw_irq.h> /* the arch dependent stuff */ |
94 | ||
95 | extern int setup_irq(unsigned int irq, struct irqaction * new); | |
96 | ||
97 | #ifdef CONFIG_GENERIC_HARDIRQS | |
98 | extern cpumask_t irq_affinity[NR_IRQS]; | |
54d5d424 AR |
99 | |
100 | #ifdef CONFIG_SMP | |
101 | static inline void set_native_irq_info(int irq, cpumask_t mask) | |
102 | { | |
103 | irq_affinity[irq] = mask; | |
104 | } | |
105 | #else | |
106 | static inline void set_native_irq_info(int irq, cpumask_t mask) | |
107 | { | |
108 | } | |
109 | #endif | |
110 | ||
111 | #ifdef CONFIG_SMP | |
112 | ||
113 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) | |
114 | extern cpumask_t pending_irq_cpumask[NR_IRQS]; | |
115 | ||
116 | static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | |
117 | { | |
118 | irq_desc_t *desc = irq_desc + irq; | |
119 | unsigned long flags; | |
120 | ||
121 | spin_lock_irqsave(&desc->lock, flags); | |
122 | desc->move_irq = 1; | |
123 | pending_irq_cpumask[irq] = mask; | |
124 | spin_unlock_irqrestore(&desc->lock, flags); | |
125 | } | |
126 | ||
127 | static inline void | |
128 | move_native_irq(int irq) | |
129 | { | |
130 | cpumask_t tmp; | |
131 | irq_desc_t *desc = irq_descp(irq); | |
132 | ||
133 | if (likely (!desc->move_irq)) | |
134 | return; | |
135 | ||
136 | desc->move_irq = 0; | |
137 | ||
138 | if (likely(cpus_empty(pending_irq_cpumask[irq]))) | |
139 | return; | |
140 | ||
141 | if (!desc->handler->set_affinity) | |
142 | return; | |
143 | ||
144 | /* note - we hold the desc->lock */ | |
145 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | |
146 | ||
147 | /* | |
148 | * If there was a valid mask to work with, please | |
149 | * do the disable, re-program, enable sequence. | |
150 | * This is *not* particularly important for level triggered | |
151 | * but in a edge trigger case, we might be setting rte | |
152 | * when an active trigger is comming in. This could | |
153 | * cause some ioapics to mal-function. | |
154 | * Being paranoid i guess! | |
155 | */ | |
156 | if (unlikely(!cpus_empty(tmp))) { | |
157 | desc->handler->disable(irq); | |
158 | desc->handler->set_affinity(irq,tmp); | |
159 | desc->handler->enable(irq); | |
160 | } | |
161 | cpus_clear(pending_irq_cpumask[irq]); | |
162 | } | |
163 | ||
164 | #ifdef CONFIG_PCI_MSI | |
165 | /* | |
166 | * Wonder why these are dummies? | |
167 | * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq() | |
168 | * counter part after translating the vector to irq info. We need to perform | |
169 | * this operation on the real irq, when we dont use vector, i.e when | |
170 | * pci_use_vector() is false. | |
171 | */ | |
172 | static inline void move_irq(int irq) | |
173 | { | |
174 | } | |
175 | ||
176 | static inline void set_irq_info(int irq, cpumask_t mask) | |
177 | { | |
178 | } | |
179 | ||
180 | #else // CONFIG_PCI_MSI | |
181 | ||
182 | static inline void move_irq(int irq) | |
183 | { | |
184 | move_native_irq(irq); | |
185 | } | |
186 | ||
187 | static inline void set_irq_info(int irq, cpumask_t mask) | |
188 | { | |
189 | set_native_irq_info(irq, mask); | |
190 | } | |
191 | #endif // CONFIG_PCI_MSI | |
192 | ||
193 | #else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE | |
194 | ||
195 | #define move_irq(x) | |
196 | #define move_native_irq(x) | |
197 | #define set_pending_irq(x,y) | |
198 | static inline void set_irq_info(int irq, cpumask_t mask) | |
199 | { | |
200 | set_native_irq_info(irq, mask); | |
201 | } | |
202 | ||
203 | #endif // CONFIG_GENERIC_PENDING_IRQ | |
204 | ||
205 | #else // CONFIG_SMP | |
206 | ||
207 | #define move_irq(x) | |
208 | #define move_native_irq(x) | |
209 | ||
210 | #endif // CONFIG_SMP | |
211 | ||
1da177e4 LT |
212 | extern int no_irq_affinity; |
213 | extern int noirqdebug_setup(char *str); | |
214 | ||
215 | extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs, | |
200803df | 216 | struct irqaction *action); |
1da177e4 | 217 | extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs); |
200803df AC |
218 | extern void note_interrupt(unsigned int irq, irq_desc_t *desc, |
219 | int action_ret, struct pt_regs *regs); | |
1da177e4 LT |
220 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
221 | ||
222 | extern void init_irq_proc(void); | |
223 | #endif | |
224 | ||
225 | extern hw_irq_controller no_irq_type; /* needed in every arch ? */ | |
226 | ||
227 | #endif | |
228 | ||
229 | #endif /* __irq_h */ |